up follow livre

This commit is contained in:
Tykayn 2025-08-30 18:14:14 +02:00 committed by tykayn
parent b4b4398bb0
commit 3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions

View file

@ -0,0 +1,228 @@
"""
========================================
Interpolation (:mod:`scipy.interpolate`)
========================================
.. currentmodule:: scipy.interpolate
Sub-package for functions and objects used in interpolation.
See the :ref:`user guide <tutorial-interpolate>` for recommendations on choosing a
routine, and other usage details.
Univariate interpolation
========================
.. autosummary::
:toctree: generated/
make_interp_spline
CubicSpline
PchipInterpolator
Akima1DInterpolator
FloaterHormannInterpolator
BarycentricInterpolator
KroghInterpolator
CubicHermiteSpline
**Low-level data structures for univariate interpolation:**
.. autosummary::
:toctree: generated/
PPoly
BPoly
BSpline
Multivariate interpolation
==========================
**Unstructured data**
.. autosummary::
:toctree: generated/
LinearNDInterpolator
NearestNDInterpolator
CloughTocher2DInterpolator
RBFInterpolator
**For data on a grid:**
.. autosummary::
:toctree: generated/
RegularGridInterpolator
.. seealso::
`scipy.ndimage.map_coordinates`,
:ref:`An example wrapper for map_coordinates <tutorial-interpolate_cartesian-grids>`
**Low-level data structures for tensor product polynomials and splines:**
.. autosummary::
:toctree: generated/
NdPPoly
NdBSpline
1-D spline smoothing and approximation
======================================
.. autosummary::
:toctree: generated/
make_lsq_spline
make_smoothing_spline
make_splrep
make_splprep
generate_knots
Rational Approximation
======================
.. autosummary::
:toctree: generated/
AAA
Interfaces to FITPACK routines for 1D and 2D spline fitting
===========================================================
This section lists wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__
functionality for 1D and 2D smoothing splines. In most cases, users are better off
using higher-level routines listed in previous sections.
1D FITPACK splines
------------------
This package provides two sets of functionally equivalent wrappers: object-oriented and
functional.
**Functional FITPACK interface:**
.. autosummary::
:toctree: generated/
splrep
splprep
splev
splint
sproot
spalde
splder
splantider
insert
**Object-oriented FITPACK interface:**
.. autosummary::
:toctree: generated/
UnivariateSpline
InterpolatedUnivariateSpline
LSQUnivariateSpline
2D FITPACK splines
------------------
**For data on a grid:**
.. autosummary::
:toctree: generated/
RectBivariateSpline
RectSphereBivariateSpline
**For unstructured data (OOP interface):**
.. autosummary::
:toctree: generated/
BivariateSpline
SmoothBivariateSpline
SmoothSphereBivariateSpline
LSQBivariateSpline
LSQSphereBivariateSpline
**For unstructured data (functional interface):**
.. autosummary::
:toctree: generated/
bisplrep
bisplev
Additional tools
================
.. autosummary::
:toctree: generated/
lagrange
approximate_taylor_polynomial
pade
interpn
griddata
barycentric_interpolate
krogh_interpolate
pchip_interpolate
Rbf
interp1d
interp2d
.. seealso::
`scipy.ndimage.map_coordinates`,
`scipy.ndimage.spline_filter`,
""" # noqa: E501
from ._interpolate import *
from ._fitpack_py import *
from ._fitpack2 import *
from ._rbf import Rbf
from ._rbfinterp import *
from ._polyint import *
from ._cubic import *
from ._ndgriddata import *
from ._bsplines import *
from ._fitpack_repro import generate_knots, make_splrep, make_splprep
from ._pade import *
from ._rgi import *
from ._ndbspline import NdBSpline
from ._bary_rational import *
# Deprecated namespaces, to be removed in v2.0.0
from . import fitpack, fitpack2, interpolate, ndgriddata, polyint, rbf, interpnd
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
# Backward compatibility
pchip = PchipInterpolator

View file

@ -0,0 +1,715 @@
# Copyright (c) 2017, The Chancellor, Masters and Scholars of the University
# of Oxford, and the Chebfun Developers. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of Oxford nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import operator
import numpy as np
import scipy
__all__ = ["AAA", "FloaterHormannInterpolator"]
class _BarycentricRational:
"""Base class for barycentric representation of a rational function."""
def __init__(self, x, y, **kwargs):
# input validation
z = np.asarray(x)
f = np.asarray(y)
self._input_validation(z, f, **kwargs)
# Remove infinite or NaN function values and repeated entries
to_keep = np.logical_and.reduce(
((np.isfinite(f)) & (~np.isnan(f))).reshape(f.shape[0], -1),
axis=-1
)
f = f[to_keep, ...]
z = z[to_keep]
z, uni = np.unique(z, return_index=True)
f = f[uni, ...]
self._shape = f.shape[1:]
self._support_points, self._support_values, self.weights = (
self._compute_weights(z, f, **kwargs)
)
# only compute once
self._poles = None
self._residues = None
self._roots = None
def _input_validation(self, x, y, **kwargs):
if x.ndim != 1:
raise ValueError("`x` must be 1-D.")
if not y.ndim >= 1:
raise ValueError("`y` must be at least 1-D.")
if x.size != y.shape[0]:
raise ValueError("`x` be the same size as the first dimension of `y`.")
if not np.all(np.isfinite(x)):
raise ValueError("`x` must be finite.")
def _compute_weights(z, f, **kwargs):
raise NotImplementedError
def __call__(self, z):
"""Evaluate the rational approximation at given values.
Parameters
----------
z : array_like
Input values.
"""
# evaluate rational function in barycentric form.
z = np.asarray(z)
zv = np.ravel(z)
support_values = self._support_values.reshape(
(self._support_values.shape[0], -1)
)
weights = self.weights[..., np.newaxis]
# Cauchy matrix
# Ignore errors due to inf/inf at support points, these will be fixed later
with np.errstate(invalid="ignore", divide="ignore"):
CC = 1 / np.subtract.outer(zv, self._support_points)
# Vector of values
r = CC @ (weights * support_values) / (CC @ weights)
# Deal with input inf: `r(inf) = lim r(z) = sum(w*f) / sum(w)`
if np.any(np.isinf(zv)):
r[np.isinf(zv)] = (np.sum(weights * support_values)
/ np.sum(weights))
# Deal with NaN
ii = np.nonzero(np.isnan(r))[0]
for jj in ii:
if np.isnan(zv[jj]) or not np.any(zv[jj] == self._support_points):
# r(NaN) = NaN is fine.
# The second case may happen if `r(zv[ii]) = 0/0` at some point.
pass
else:
# Clean up values `NaN = inf/inf` at support points.
# Find the corresponding node and set entry to correct value:
r[jj] = support_values[zv[jj] == self._support_points].squeeze()
return np.reshape(r, z.shape + self._shape)
def poles(self):
"""Compute the poles of the rational approximation.
Returns
-------
poles : array
Poles of the AAA approximation, repeated according to their multiplicity
but not in any specific order.
"""
if self._poles is None:
# Compute poles via generalized eigenvalue problem
m = self.weights.size
B = np.eye(m + 1, dtype=self.weights.dtype)
B[0, 0] = 0
E = np.zeros_like(B, dtype=np.result_type(self.weights,
self._support_points))
E[0, 1:] = self.weights
E[1:, 0] = 1
np.fill_diagonal(E[1:, 1:], self._support_points)
pol = scipy.linalg.eigvals(E, B)
self._poles = pol[np.isfinite(pol)]
return self._poles
def residues(self):
"""Compute the residues of the poles of the approximation.
Returns
-------
residues : array
Residues associated with the `poles` of the approximation
"""
if self._residues is None:
# Compute residues via formula for res of quotient of analytic functions
with np.errstate(divide="ignore", invalid="ignore"):
N = (1/(np.subtract.outer(self.poles(), self._support_points))) @ (
self._support_values * self.weights
)
Ddiff = (
-((1/np.subtract.outer(self.poles(), self._support_points))**2)
@ self.weights
)
self._residues = N / Ddiff
return self._residues
def roots(self):
"""Compute the zeros of the rational approximation.
Returns
-------
zeros : array
Zeros of the AAA approximation, repeated according to their multiplicity
but not in any specific order.
"""
if self._roots is None:
# Compute zeros via generalized eigenvalue problem
m = self.weights.size
B = np.eye(m + 1, dtype=self.weights.dtype)
B[0, 0] = 0
E = np.zeros_like(B, dtype=np.result_type(self.weights,
self._support_values,
self._support_points))
E[0, 1:] = self.weights * self._support_values
E[1:, 0] = 1
np.fill_diagonal(E[1:, 1:], self._support_points)
zer = scipy.linalg.eigvals(E, B)
self._roots = zer[np.isfinite(zer)]
return self._roots
class AAA(_BarycentricRational):
r"""
AAA real or complex rational approximation.
As described in [1]_, the AAA algorithm is a greedy algorithm for approximation by
rational functions on a real or complex set of points. The rational approximation is
represented in a barycentric form from which the roots (zeros), poles, and residues
can be computed.
Parameters
----------
x : 1D array_like, shape (n,)
1-D array containing values of the independent variable. Values may be real or
complex but must be finite.
y : 1D array_like, shape (n,)
Function values ``f(x)``. Infinite and NaN values of `values` and
corresponding values of `points` will be discarded.
rtol : float, optional
Relative tolerance, defaults to ``eps**0.75``. If a small subset of the entries
in `values` are much larger than the rest the default tolerance may be too
loose. If the tolerance is too tight then the approximation may contain
Froissart doublets or the algorithm may fail to converge entirely.
max_terms : int, optional
Maximum number of terms in the barycentric representation, defaults to ``100``.
Must be greater than or equal to one.
clean_up : bool, optional
Automatic removal of Froissart doublets, defaults to ``True``. See notes for
more details.
clean_up_tol : float, optional
Poles with residues less than this number times the geometric mean
of `values` times the minimum distance to `points` are deemed spurious by the
cleanup procedure, defaults to 1e-13. See notes for more details.
Attributes
----------
support_points : array
Support points of the approximation. These are a subset of the provided `x` at
which the approximation strictly interpolates `y`.
See notes for more details.
support_values : array
Value of the approximation at the `support_points`.
weights : array
Weights of the barycentric approximation.
errors : array
Error :math:`|f(z) - r(z)|_\infty` over `points` in the successive iterations
of AAA.
Warns
-----
RuntimeWarning
If `rtol` is not achieved in `max_terms` iterations.
See Also
--------
FloaterHormannInterpolator : Floater-Hormann barycentric rational interpolation.
pade : Padé approximation.
Notes
-----
At iteration :math:`m` (at which point there are :math:`m` terms in the both the
numerator and denominator of the approximation), the
rational approximation in the AAA algorithm takes the barycentric form
.. math::
r(z) = n(z)/d(z) =
\frac{\sum_{j=1}^m\ w_j f_j / (z - z_j)}{\sum_{j=1}^m w_j / (z - z_j)},
where :math:`z_1,\dots,z_m` are real or complex support points selected from
`x`, :math:`f_1,\dots,f_m` are the corresponding real or complex data values
from `y`, and :math:`w_1,\dots,w_m` are real or complex weights.
Each iteration of the algorithm has two parts: the greedy selection the next support
point and the computation of the weights. The first part of each iteration is to
select the next support point to be added :math:`z_{m+1}` from the remaining
unselected `x`, such that the nonlinear residual
:math:`|f(z_{m+1}) - n(z_{m+1})/d(z_{m+1})|` is maximised. The algorithm terminates
when this maximum is less than ``rtol * np.linalg.norm(f, ord=np.inf)``. This means
the interpolation property is only satisfied up to a tolerance, except at the
support points where approximation exactly interpolates the supplied data.
In the second part of each iteration, the weights :math:`w_j` are selected to solve
the least-squares problem
.. math::
\text{minimise}_{w_j}|fd - n| \quad \text{subject to} \quad
\sum_{j=1}^{m+1} w_j = 1,
over the unselected elements of `x`.
One of the challenges with working with rational approximations is the presence of
Froissart doublets, which are either poles with vanishingly small residues or
pole-zero pairs that are close enough together to nearly cancel, see [2]_. The
greedy nature of the AAA algorithm means Froissart doublets are rare. However, if
`rtol` is set too tight then the approximation will stagnate and many Froissart
doublets will appear. Froissart doublets can usually be removed by removing support
points and then resolving the least squares problem. The support point :math:`z_j`,
which is the closest support point to the pole :math:`a` with residue
:math:`\alpha`, is removed if the following is satisfied
.. math::
|\alpha| / |z_j - a| < \verb|clean_up_tol| \cdot \tilde{f},
where :math:`\tilde{f}` is the geometric mean of `support_values`.
References
----------
.. [1] Y. Nakatsukasa, O. Sete, and L. N. Trefethen, "The AAA algorithm for
rational approximation", SIAM J. Sci. Comp. 40 (2018), A1494-A1522.
:doi:`10.1137/16M1106122`
.. [2] J. Gilewicz and M. Pindor, Pade approximants and noise: rational functions,
J. Comp. Appl. Math. 105 (1999), pp. 285-297.
:doi:`10.1016/S0377-0427(02)00674-X`
Examples
--------
Here we reproduce a number of the numerical examples from [1]_ as a demonstration
of the functionality offered by this method.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import AAA
>>> import warnings
For the first example we approximate the gamma function on ``[-3.5, 4.5]`` by
extrapolating from 100 samples in ``[-1.5, 1.5]``.
>>> from scipy.special import gamma
>>> sample_points = np.linspace(-1.5, 1.5, num=100)
>>> r = AAA(sample_points, gamma(sample_points))
>>> z = np.linspace(-3.5, 4.5, num=1000)
>>> fig, ax = plt.subplots()
>>> ax.plot(z, gamma(z), label="Gamma")
>>> ax.plot(sample_points, gamma(sample_points), label="Sample points")
>>> ax.plot(z, r(z).real, '--', label="AAA approximation")
>>> ax.set(xlabel="z", ylabel="r(z)", ylim=[-8, 8], xlim=[-3.5, 4.5])
>>> ax.legend()
>>> plt.show()
We can also view the poles of the rational approximation and their residues:
>>> order = np.argsort(r.poles())
>>> r.poles()[order]
array([-3.81591039e+00+0.j , -3.00269049e+00+0.j ,
-1.99999988e+00+0.j , -1.00000000e+00+0.j ,
5.85842812e-17+0.j , 4.77485458e+00-3.06919376j,
4.77485458e+00+3.06919376j, 5.29095868e+00-0.97373072j,
5.29095868e+00+0.97373072j])
>>> r.residues()[order]
array([ 0.03658074 +0.j , -0.16915426 -0.j ,
0.49999915 +0.j , -1. +0.j ,
1. +0.j , -0.81132013 -2.30193429j,
-0.81132013 +2.30193429j, 0.87326839+10.70148546j,
0.87326839-10.70148546j])
For the second example, we call `AAA` with a spiral of 1000 points that wind 7.5
times around the origin in the complex plane.
>>> z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, 1000))
>>> r = AAA(z, np.tan(np.pi*z/2), rtol=1e-13)
We see that AAA takes 12 steps to converge with the following errors:
>>> r.errors.size
12
>>> r.errors
array([2.49261500e+01, 4.28045609e+01, 1.71346935e+01, 8.65055336e-02,
1.27106444e-02, 9.90889874e-04, 5.86910543e-05, 1.28735561e-06,
3.57007424e-08, 6.37007837e-10, 1.67103357e-11, 1.17112299e-13])
We can also plot the computed poles:
>>> fig, ax = plt.subplots()
>>> ax.plot(z.real, z.imag, '.', markersize=2, label="Sample points")
>>> ax.plot(r.poles().real, r.poles().imag, '.', markersize=5,
... label="Computed poles")
>>> ax.set(xlim=[-3.5, 3.5], ylim=[-3.5, 3.5], aspect="equal")
>>> ax.legend()
>>> plt.show()
We now demonstrate the removal of Froissart doublets using the `clean_up` method
using an example from [1]_. Here we approximate the function
:math:`f(z)=\log(2 + z^4)/(1 + 16z^4)` by sampling it at 1000 roots of unity. The
algorithm is run with ``rtol=0`` and ``clean_up=False`` to deliberately cause
Froissart doublets to appear.
>>> z = np.exp(1j*2*np.pi*np.linspace(0,1, num=1000))
>>> def f(z):
... return np.log(2 + z**4)/(1 - 16*z**4)
>>> with warnings.catch_warnings(): # filter convergence warning due to rtol=0
... warnings.simplefilter('ignore', RuntimeWarning)
... r = AAA(z, f(z), rtol=0, max_terms=50, clean_up=False)
>>> mask = np.abs(r.residues()) < 1e-13
>>> fig, axs = plt.subplots(ncols=2)
>>> axs[0].plot(r.poles().real[~mask], r.poles().imag[~mask], '.')
>>> axs[0].plot(r.poles().real[mask], r.poles().imag[mask], 'r.')
Now we call the `clean_up` method to remove Froissart doublets.
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', RuntimeWarning)
... r.clean_up()
4 # may vary
>>> mask = np.abs(r.residues()) < 1e-13
>>> axs[1].plot(r.poles().real[~mask], r.poles().imag[~mask], '.')
>>> axs[1].plot(r.poles().real[mask], r.poles().imag[mask], 'r.')
>>> plt.show()
The left image shows the poles prior of the approximation ``clean_up=False`` with
poles with residue less than ``10^-13`` in absolute value shown in red. The right
image then shows the poles after the `clean_up` method has been called.
"""
def __init__(self, x, y, *, rtol=None, max_terms=100, clean_up=True,
clean_up_tol=1e-13):
super().__init__(x, y, rtol=rtol, max_terms=max_terms)
if clean_up:
self.clean_up(clean_up_tol)
def _input_validation(self, x, y, rtol=None, max_terms=100, clean_up=True,
clean_up_tol=1e-13):
max_terms = operator.index(max_terms)
if max_terms < 1:
raise ValueError("`max_terms` must be an integer value greater than or "
"equal to one.")
if y.ndim != 1:
raise ValueError("`y` must be 1-D.")
super()._input_validation(x, y)
@property
def support_points(self):
return self._support_points
@property
def support_values(self):
return self._support_values
def _compute_weights(self, z, f, rtol, max_terms):
# Initialization for AAA iteration
M = np.size(z)
mask = np.ones(M, dtype=np.bool_)
dtype = np.result_type(z, f, 1.0)
rtol = np.finfo(dtype).eps**0.75 if rtol is None else rtol
atol = rtol * np.linalg.norm(f, ord=np.inf)
zj = np.empty(max_terms, dtype=dtype)
fj = np.empty(max_terms, dtype=dtype)
# Cauchy matrix
C = np.empty((M, max_terms), dtype=dtype)
# Loewner matrix
A = np.empty((M, max_terms), dtype=dtype)
errors = np.empty(max_terms, dtype=A.real.dtype)
R = np.repeat(np.mean(f), M)
# AAA iteration
for m in range(max_terms):
# Introduce next support point
# Select next support point
jj = np.argmax(np.abs(f[mask] - R[mask]))
# Update support points
zj[m] = z[mask][jj]
# Update data values
fj[m] = f[mask][jj]
# Next column of Cauchy matrix
# Ignore errors as we manually interpolate at support points
with np.errstate(divide="ignore", invalid="ignore"):
C[:, m] = 1 / (z - z[mask][jj])
# Update mask
mask[np.nonzero(mask)[0][jj]] = False
# Update Loewner matrix
# Ignore errors as inf values will be masked out in SVD call
with np.errstate(invalid="ignore"):
A[:, m] = (f - fj[m]) * C[:, m]
# Compute weights
rows = mask.sum()
if rows >= m + 1:
# The usual tall-skinny case
_, s, V = scipy.linalg.svd(
A[mask, : m + 1], full_matrices=False, check_finite=False,
)
# Treat case of multiple min singular values
mm = s == np.min(s)
# Aim for non-sparse weight vector
wj = (V.conj()[mm, :].sum(axis=0) / np.sqrt(mm.sum())).astype(dtype)
else:
# Fewer rows than columns
V = scipy.linalg.null_space(A[mask, : m + 1], check_finite=False)
nm = V.shape[-1]
# Aim for non-sparse wt vector
wj = V.sum(axis=-1) / np.sqrt(nm)
# Compute rational approximant
# Omit columns with `wj == 0`
i0 = wj != 0
# Ignore errors as we manually interpolate at support points
with np.errstate(invalid="ignore"):
# Numerator
N = C[:, : m + 1][:, i0] @ (wj[i0] * fj[: m + 1][i0])
# Denominator
D = C[:, : m + 1][:, i0] @ wj[i0]
# Interpolate at support points with `wj !=0`
D_inf = np.isinf(D) | np.isnan(D)
D[D_inf] = 1
N[D_inf] = f[D_inf]
R = N / D
# Check if converged
max_error = np.linalg.norm(f - R, ord=np.inf)
errors[m] = max_error
if max_error <= atol:
break
if m == max_terms - 1:
warnings.warn(f"AAA failed to converge within {max_terms} iterations.",
RuntimeWarning, stacklevel=2)
# Trim off unused array allocation
zj = zj[: m + 1]
fj = fj[: m + 1]
# Remove support points with zero weight
i_non_zero = wj != 0
self.errors = errors[: m + 1]
self._points = z
self._values = f
return zj[i_non_zero], fj[i_non_zero], wj[i_non_zero]
def clean_up(self, cleanup_tol=1e-13):
"""Automatic removal of Froissart doublets.
Parameters
----------
cleanup_tol : float, optional
Poles with residues less than this number times the geometric mean
of `values` times the minimum distance to `points` are deemed spurious by
the cleanup procedure, defaults to 1e-13.
Returns
-------
int
Number of Froissart doublets detected
"""
# Find negligible residues
geom_mean_abs_f = scipy.stats.gmean(np.abs(self._values))
Z_distances = np.min(
np.abs(np.subtract.outer(self.poles(), self._points)), axis=1
)
with np.errstate(divide="ignore", invalid="ignore"):
ii = np.nonzero(
np.abs(self.residues()) / Z_distances < cleanup_tol * geom_mean_abs_f
)
ni = ii[0].size
if ni == 0:
return ni
warnings.warn(f"{ni} Froissart doublets detected.", RuntimeWarning,
stacklevel=2)
# For each spurious pole find and remove closest support point
closest_spt_point = np.argmin(
np.abs(np.subtract.outer(self._support_points, self.poles()[ii])), axis=0
)
self._support_points = np.delete(self._support_points, closest_spt_point)
self._support_values = np.delete(self._support_values, closest_spt_point)
# Remove support points z from sample set
mask = np.logical_and.reduce(
np.not_equal.outer(self._points, self._support_points), axis=1
)
f = self._values[mask]
z = self._points[mask]
# recompute weights, we resolve the least squares problem for the remaining
# support points
m = self._support_points.size
# Cauchy matrix
C = 1 / np.subtract.outer(z, self._support_points)
# Loewner matrix
A = f[:, np.newaxis] * C - C * self._support_values
# Solve least-squares problem to obtain weights
_, _, V = scipy.linalg.svd(A, check_finite=False)
self.weights = np.conj(V[m - 1,:])
# reset roots, poles, residues as cached values will be wrong with new weights
self._poles = None
self._residues = None
self._roots = None
return ni
class FloaterHormannInterpolator(_BarycentricRational):
r"""Floater-Hormann barycentric rational interpolator (C∞ smooth on real axis).
As described in [1]_, the method of Floater and Hormann computes weights for a
barycentric rational interpolant with no poles on the real axis.
Parameters
----------
x : 1D array_like, shape (n,)
1-D array containing values of the independent variable. Values may be real or
complex but must be finite.
y : array_like, shape (n, ...)
Array containing values of the dependent variable. Infinite and NaN values
of `y` and corresponding values of `x` will be discarded.
d : int, default: 3
Integer satisfying ``0 <= d < n``. Floater-Hormann interpolation blends
``n - d`` polynomials of degree `d` together; for ``d = n - 1``, this is
equivalent to polynomial interpolation.
Attributes
----------
weights : array
Weights of the barycentric approximation.
See Also
--------
AAA : Barycentric rational approximation of real and complex functions.
pade : Padé approximation.
Notes
-----
The Floater-Hormann interpolant is a rational function that interpolates the data
with approximation order :math:`O(h^{d+1})`. The rational function blends ``n - d``
polynomials of degree `d` together to produce a rational interpolant that contains
no poles on the real axis, unlike `AAA`. The interpolant is given
by
.. math::
r(x) = \frac{\sum_{i=0}^{n-d} \lambda_i(x) p_i(x)}
{\sum_{i=0}^{n-d} \lambda_i(x)},
where :math:`p_i(x)` is an interpolating polynomial of at most degree `d` through
the points :math:`(x_i,y_i),\dots,(x_{i+d},y_{i+d})`, and :math:`\lambda_i(z)` are
blending functions defined by
.. math::
\lambda_i(x) = \frac{(-1)^i}{(x - x_i)\cdots(x - x_{i+d})}.
When ``d = n - 1`` this reduces to polynomial interpolation.
Due to its stability, the following barycentric representation of the above equation
is used for computation
.. math::
r(z) = \frac{\sum_{k=1}^m\ w_k f_k / (x - x_k)}{\sum_{k=1}^m w_k / (x - x_k)},
where the weights :math:`w_j` are computed as
.. math::
w_k &= (-1)^{k - d} \sum_{i \in J_k} \prod_{j = i, j \neq k}^{i + d}
1/|x_k - x_j|, \\
J_k &= \{ i \in I: k - d \leq i \leq k\},\\
I &= \{0, 1, \dots, n - d\}.
References
----------
.. [1] M.S. Floater and K. Hormann, "Barycentric rational interpolation with no
poles and high rates of approximation", Numer. Math. 107, 315 (2007).
:doi:`10.1007/s00211-007-0093-y`
Examples
--------
Here we compare the method against polynomial interpolation for an example where
the polynomial interpolation fails due to Runge's phenomenon.
>>> import numpy as np
>>> from scipy.interpolate import (FloaterHormannInterpolator,
... BarycentricInterpolator)
>>> def f(x):
... return 1/(1 + x**2)
>>> x = np.linspace(-5, 5, num=15)
>>> r = FloaterHormannInterpolator(x, f(x))
>>> p = BarycentricInterpolator(x, f(x))
>>> xx = np.linspace(-5, 5, num=1000)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(xx, f(xx), label="f(x)")
>>> ax.plot(xx, r(xx), "--", label="Floater-Hormann")
>>> ax.plot(xx, p(xx), "--", label="Polynomial")
>>> ax.legend()
>>> plt.show()
"""
def __init__(self, points, values, *, d=3):
super().__init__(points, values, d=d)
def _input_validation(self, x, y, d):
d = operator.index(d)
if not (0 <= d < len(x)):
raise ValueError("`d` must satisfy 0 <= d < n")
super()._input_validation(x, y)
def _compute_weights(self, z, f, d):
# Floater and Hormann 2007 Eqn. (18) 3 equations later
w = np.zeros_like(z, dtype=np.result_type(z, 1.0))
n = w.size
for k in range(n):
for i in range(max(k-d, 0), min(k+1, n-d)):
w[k] += 1/np.prod(np.abs(np.delete(z[k] - z[i : i + d + 1], k - i)))
w *= (-1.)**(np.arange(n) - d)
return z, f, w

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,973 @@
"""Interpolation algorithms using piecewise cubic polynomials."""
from typing import Literal
import numpy as np
from scipy.linalg import solve, solve_banded
from . import PPoly
from ._polyint import _isscalar
__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
"Akima1DInterpolator", "CubicSpline"]
def prepare_input(x, y, axis, dydx=None):
"""Prepare input for cubic spline interpolators.
All data are converted to numpy arrays and checked for correctness.
Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th
axis. The value of `axis` is converted to lie in
[0, number of dimensions of `y`).
"""
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
x = x.astype(float)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
if dydx is not None:
dydx = np.asarray(dydx)
if y.shape != dydx.shape:
raise ValueError("The shapes of `y` and `dydx` must be identical.")
if np.issubdtype(dydx.dtype, np.complexfloating):
dtype = complex
dydx = dydx.astype(dtype, copy=False)
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError(f"The length of `y` along `axis`={axis} doesn't "
"match the length of `x`")
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
if dydx is not None and not np.all(np.isfinite(dydx)):
raise ValueError("`dydx` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
y = np.moveaxis(y, axis, 0)
if dydx is not None:
dydx = np.moveaxis(dydx, axis, 0)
return x, dx, y, axis, dydx
class CubicHermiteSpline(PPoly):
"""Piecewise cubic interpolator to fit values and first derivatives (C1 smooth).
The result is represented as a `PPoly` instance.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
dydx : array_like
Array containing derivatives of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), it is set to True.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
See Also
--------
Akima1DInterpolator : Akima 1D interpolator.
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
Notes
-----
If you want to create a higher-order spline matching higher-order
derivatives, use `BPoly.from_derivatives`.
References
----------
.. [1] `Cubic Hermite spline
<https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
on Wikipedia.
"""
def __init__(self, x, y, dydx, axis=0, extrapolate=None):
if extrapolate is None:
extrapolate = True
x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - dydx[:-1]) / dxr - t
c[2] = dydx[:-1]
c[3] = y[:-1]
super().__init__(c, x, extrapolate=extrapolate)
self.axis = axis
class PchipInterpolator(CubicHermiteSpline):
r"""PCHIP shape-preserving interpolator (C1 smooth).
``x`` and ``y`` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray, shape (npoints, )
A 1-D array of monotonically increasing real values. ``x`` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray, shape (..., npoints, ...)
A N-D array of real values. ``y``'s length along the interpolation
axis must be equal to the length of ``x``. Use the ``axis``
parameter to select the interpolation axis.
axis : int, optional
Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
to ``axis=0``.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
See Also
--------
CubicHermiteSpline : Piecewise-cubic interpolator.
Akima1DInterpolator : Akima 1D interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and J. Butland,
A method for constructing local
monotone piecewise cubic interpolants,
SIAM J. Sci. Comput., 5(2), 300-304 (1984).
:doi:`10.1137/0905021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x, _, y, axis, _ = prepare_input(x, y, axis)
if np.iscomplexobj(y):
msg = ("`PchipInterpolator` only works with real values for `y`. "
"If you are trying to use the real components of the passed array, "
"use `np.real` on the array before passing to `PchipInterpolator`.")
raise ValueError(msg)
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
dk = self._find_derivatives(xp, y)
super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore', invalid='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `scipy.interpolate.PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
Examples
--------
We can interpolate 2D observed data using pchip interpolation:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import pchip_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = pchip_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="pchip interpolation")
>>> plt.legend()
>>> plt.show()
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
class Akima1DInterpolator(CubicHermiteSpline):
r"""Akima "visually pleasing" interpolator (C1 smooth).
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (npoints, )
1-D array of monotonically increasing real values.
y : ndarray, shape (..., npoints, ...)
N-D array of real values. The length of ``y`` along the interpolation axis
must be equal to the length of ``x``. Use the ``axis`` parameter to
select the interpolation axis.
axis : int, optional
Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
to ``axis=0``.
method : {'akima', 'makima'}, optional
If ``"makima"``, use the modified Akima interpolation [2]_.
Defaults to ``"akima"``, use the Akima interpolation [1]_.
.. versionadded:: 1.13.0
extrapolate : {bool, None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If None,
``extrapolate`` is set to False.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
Let :math:`\delta_i = (y_{i+1} - y_i) / (x_{i+1} - x_i)` be the slopes of
the interval :math:`\left[x_i, x_{i+1}\right)`. Akima's derivative at
:math:`x_i` is defined as:
.. math::
d_i = \frac{w_1}{w_1 + w_2}\delta_{i-1} + \frac{w_2}{w_1 + w_2}\delta_i
In the Akima interpolation [1]_ (``method="akima"``), the weights are:
.. math::
\begin{aligned}
w_1 &= |\delta_{i+1} - \delta_i| \\
w_2 &= |\delta_{i-1} - \delta_{i-2}|
\end{aligned}
In the modified Akima interpolation [2]_ (``method="makima"``),
to eliminate overshoot and avoid edge cases of both numerator and
denominator being equal to 0, the weights are modified as follows:
.. math::
\begin{align*}
w_1 &= |\delta_{i+1} - \delta_i| + |\delta_{i+1} + \delta_i| / 2 \\
w_2 &= |\delta_{i-1} - \delta_{i-2}| + |\delta_{i-1} + \delta_{i-2}| / 2
\end{align*}
Examples
--------
Comparison of ``method="akima"`` and ``method="makima"``:
>>> import numpy as np
>>> from scipy.interpolate import Akima1DInterpolator
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(1, 7, 7)
>>> y = np.array([-1, -1, -1, 0, 1, 1, 1])
>>> xs = np.linspace(min(x), max(x), num=100)
>>> y_akima = Akima1DInterpolator(x, y, method="akima")(xs)
>>> y_makima = Akima1DInterpolator(x, y, method="makima")(xs)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, "o", label="data")
>>> ax.plot(xs, y_akima, label="akima")
>>> ax.plot(xs, y_makima, label="makima")
>>> ax.legend()
>>> fig.show()
The overshoot that occurred in ``"akima"`` has been avoided in ``"makima"``.
References
----------
.. [1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602. :doi:`10.1145/321607.321609`
.. [2] Makima Piecewise Cubic Interpolation. Cleve Moler and Cosmin Ionita, 2019.
https://blogs.mathworks.com/cleve/2019/04/29/makima-piecewise-cubic-interpolation/
"""
def __init__(self, x, y, axis=0, *, method: Literal["akima", "makima"]="akima",
extrapolate:bool | None = None):
if method not in {"akima", "makima"}:
raise NotImplementedError(f"`method`={method} is unsupported.")
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
x, dx, y, axis, _ = prepare_input(x, y, axis)
if np.iscomplexobj(y):
msg = ("`Akima1DInterpolator` only works with real values for `y`. "
"If you are trying to use the real components of the passed array, "
"use `np.real` on the array before passing to "
"`Akima1DInterpolator`.")
raise ValueError(msg)
# Akima extrapolation historically False; parent class defaults to True.
extrapolate = False if extrapolate is None else extrapolate
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
hk = xp[1:] - xp[:-1]
mk = (y[1:] - y[:-1]) / hk
t = np.zeros_like(y)
t[...] = mk
else:
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not
# defined. This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
if method == "makima":
pm = np.abs(m[1:] + m[:-1])
f1 = dm[2:] + 0.5 * pm[2:]
f2 = dm[:-2] + 0.5 * pm[:-2]
else:
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12, initial=-np.inf))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
super().__init__(x, y, t, axis=0, extrapolate=extrapolate)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1-D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(CubicHermiteSpline):
"""Piecewise cubic interpolator to fit values (C2 smooth).
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple ``(order, deriv_values)`` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
See Also
--------
Akima1DInterpolator : Akima 1D interpolator.
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
Parameters `bc_type` and ``extrapolate`` work independently, i.e. the
former controls only construction of a spline, and the latter only
evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> import numpy as np
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(x, y, 'o', label='data')
>>> ax.plot(xs, np.sin(xs), label='true')
>>> ax.plot(xs, cs(xs), label="S")
>>> ax.plot(xs, cs(xs, 1), label="S'")
>>> ax.plot(xs, cs(xs, 2), label="S''")
>>> ax.plot(xs, cs(xs, 3), label="S'''")
>>> ax.set_xlim(-0.5, 9.5)
>>> ax.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> ax.plot(np.cos(xs), np.sin(xs), label='true')
>>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> ax.axes.set_aspect('equal')
>>> ax.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, dx, y, axis, _ = prepare_input(x, y, axis)
n = len(x)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
if y.size == 0:
# bail out early for zero-sized arrays
s = np.zeros_like(y)
else:
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the
# same way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
m = b.shape[0]
s = solve(A, b.reshape(m, -1), overwrite_a=True, overwrite_b=True,
check_finite=False).reshape(b.shape)
elif n == 3 and bc[0] == 'periodic':
# In case when number of points is 3 we compute the derivatives
# manually
t = (slope / dxr).sum(0) / (1. / dxr).sum(0)
s = np.broadcast_to(t, (n,) + y.shape[1:])
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the
# linear system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
# for more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-3]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
m = b1.shape[0]
s1 = solve_banded((1, 1), Ac, b1.reshape(m, -1), overwrite_ab=False,
overwrite_b=False, check_finite=False)
s1 = s1.reshape(b1.shape)
m = b2.shape[0]
s2 = solve_banded((1, 1), Ac, b2.reshape(m, -1), overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = s2.reshape(b2.shape)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
m = b.shape[0]
s = solve_banded((1, 1), A, b.reshape(m, -1), overwrite_ab=True,
overwrite_b=True, check_finite=False)
s = s.reshape(b.shape)
super().__init__(x, y, s, axis=0, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, str):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
f"The first and last `y` point along axis {axis} must "
"be identical (within machine precision) when "
"bc_type='periodic'.")
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, str):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError(f"bc_type={bc} is not allowed.")
else:
try:
deriv_order, deriv_value = bc
except Exception as e:
raise ValueError(
"A specified derivative value must be "
"given in the form (order, value)."
) from e
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
f"`deriv_value` shape {deriv_value.shape} is not "
f"the expected one {expected_deriv_shape}."
)
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,811 @@
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
https://web.archive.org/web/20010524124604/http://www.cs.kuleuven.ac.be:80/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import _dfitpack as dfitpack
dfitpack_int = dfitpack.types.intvar.dtype
def _int_overflow(x, exception, msg=None):
"""Cast the value to an dfitpack_int and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(dfitpack_int).max:
if msg is None:
msg = f'{x!r} cannot fit into an {dfitpack_int!r}'
raise exception(msg)
return dfitpack_int.type(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e., there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], dfitpack_int), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
# see the docstring of `_fitpack_py/splprep`
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], dfitpack_int), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if not quiet:
warnings.warn(
RuntimeWarning(f'Setting x[{i}][{m}]=x[{i}][0]'),
stacklevel=2
)
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError(f'1 <= k= {k} <=5 must hold')
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(
RuntimeWarning(
_iermess[ier][0] + f"\tk={k} n={len(t)} m={m} fp={fp} s={s}"
),
stacklevel=2
)
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError as e:
raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], dfitpack_int)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
# see the docstring of `_fitpack_py/splrep`
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError(f'len(w)={len(w)} is not equal to m={m}')
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError(
f'Given degree of the spline (k={k}) is not supported. (1<=k<=5)'
)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), dfitpack_int)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError as e:
raise TypeError("must call with task=1 only after"
" call with task=0,-1") from e
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + f"\tk={k} n={len(t)} m={m} fp={fp} s={s}")
warnings.warn(RuntimeWarning(_mess), stacklevel=2)
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]), stacklevel=2)
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError as e:
raise _iermess['unknown'][1](_iermess['unknown'][0]) from e
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
# see the docstring of `_fitpack_py/splev`
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError(f"0<=der={der}<=k={k} must hold")
if ext not in (0, 1, 2, 3):
raise ValueError(f"ext = {ext} not in (0, 1, 2, 3) ")
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
if der == 0:
y, ier = dfitpack.splev(t, c, k, x, ext)
else:
y, ier = dfitpack.splder(t, c, k, x, der, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
# see the docstring of `_fitpack_py/splint`
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = dfitpack.splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
# see the docstring of `_fitpack_py/sproot`
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError(f"The number of knots {len(t)}>=8")
z, m, ier = dfitpack.sproot(t, c, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z[:m]
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"),
stacklevel=2)
return z[:m]
raise TypeError("Unknown error")
def spalde(x, tck):
# see the docstring of `_fitpack_py/spalde`
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = dfitpack.spalde(t, c, k+1, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], dfitpack_int)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
If the input data is such that input dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError(f'len(w)={len(w)} is not equal to m={m}')
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('Knots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError(
f'Given degree of the spline (kx,ky={kx},{ky}) is not supported. (1<=k<=5)'
)
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _int_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
OverflowError,
msg=msg)
lwrk2 = _int_overflow(u*v*(b2 + 1) + b2, OverflowError, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (
_iermess2[ierm][0] +
f"\tkx,ky={kx},{ky} nx,ny={len(tx)},{len(ty)} m={m} fp={fp} s={s}"
)
warnings.warn(RuntimeWarning(_mess), stacklevel=2)
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = (
f"\n\tkx,ky={kx},{ky} nx,ny={len(tx)},{len(ty)} m={m} fp={fp} s={s}"
)
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess), stacklevel=2)
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError as e:
raise _iermess2['unknown'][1](_iermess2['unknown'][0]) from e
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV and PARDER from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_2d_spline>`.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError(f"0 <= dx = {dx} < kx = {kx} must hold")
if not (0 <= dy < ky):
raise ValueError(f"0 <= dy = {dy} < ky = {ky} must hold")
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
msg = "Too many data points to interpolate."
_int_overflow(x.size * y.size, MemoryError, msg=msg)
if dx != 0 or dy != 0:
_int_overflow((tx.size - kx - 1)*(ty.size - ky - 1),
MemoryError, msg=msg)
z, ier = dfitpack.parder(tx, ty, c, kx, ky, dx, dy, x, y)
else:
z, ier = dfitpack.bispev(tx, ty, c, kx, ky, x, y)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
# see the docstring of `_fitpack_py/insert`
t, c, k = tck
try:
c[0][0]
parametric = True
except Exception:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
# see the docstring of `_fitpack_py/splder`
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(f"Order of derivative (n = {n!r}) must be <= "
f"order of spline (k = {tck[2]!r})")
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append trailing dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError as e:
raise ValueError("The spline has internal repeated knots "
f"and is not differentiable {n} times") from e
return t, c, k
def splantider(tck, n=1):
# see the docstring of `_fitpack_py/splantider`
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = np.r_[np.zeros((1,) + c.shape[1:]),
c,
[c[-1]] * (k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k

View file

@ -0,0 +1,898 @@
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import numpy as np
# These are in the API for fitpack even if not used in fitpack.py itself.
from ._fitpack_impl import bisplrep, bisplev, dblint # noqa: F401
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-D curve.
.. legacy:: function
Specifically, we recommend using `make_splprep` in new code.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : array, optional
The knots needed for ``task=-1``.
There must be at least ``2*k+2`` knots.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
Returns
-------
tck : tuple
A tuple, ``(t,c,k)`` containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less than the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> import numpy as np
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using ``s=0``,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of a 1-D curve.
.. legacy:: function
Specifically, we recommend using `make_splrep` in new code.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve ``y = f(x)``.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x` and `y`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `y` values have standard-deviation given by the
vector ``d``, then `w` should be ``1/d``. Default is ``ones(len(x))``.
xb, xe : float, optional
The interval to fit. If None, these default to ``x[0]`` and ``x[-1]``
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of `k` should be avoided especially with small `s` values.
``1 <= k <= 5``.
task : {1, 0, -1}, optional
If ``task==0``, find ``t`` and ``c`` for a given smoothing factor, `s`.
If ``task==1`` find ``t`` and ``c`` for another value of the smoothing factor,
`s`. There must have been a previous call with ``task=0`` or ``task=1`` for
the same set of data (``t`` will be stored an used internally)
If ``task=-1`` find the weighted least square spline for a given set of
knots, ``t``. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s`` where ``g(x)``
is the smoothed interpolation of ``(x,y)``. The user can use `s` to control
the tradeoff between closeness and smoothness of fit. Larger `s` means
more smoothing while smaller values of `s` indicate less smoothing.
Recommended values of `s` depend on the weights, `w`. If the weights
represent the inverse of the standard-deviation of `y`, then a good `s`
value should be found in the range ``(m-sqrt(2*m),m+sqrt(2*m))`` where ``m`` is
the number of datapoints in `x`, `y`, and `w`. default : ``s=m-sqrt(2*m)`` if
weights are supplied. ``s = 0.0`` (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for ``task=-1``. If given then task is automatically set
to ``-1``.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period ``x[m-1]`` -
``x[0]`` and a smooth periodic spline approximation is returned. Values of
``y[m-1]`` and ``w[m-1]`` are not used.
The default is zero, corresponding to boundary condition 'not-a-knot'.
quiet : bool, optional
Non-zero to suppress messages.
Returns
-------
tck : tuple
A tuple ``(t,c,k)`` containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ``ier<=0``.
If ``ier in [1,2,3]``, an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, `ier`.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
The default boundary condition is 'not-a-knot', i.e. the first and second
segment at a curve end are the same polynomial. More boundary conditions are
available in `CubicSpline`.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
You can interpolate 1-D points with a B-spline curve.
Further examples are given in
:ref:`in the tutorial <tutorial-interpolate_splXXX>`.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
.. legacy:: function
Specifically, we recommend constructing a `BSpline` object and using
its ``__call__`` method.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : BSpline instance or tuple
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k, the degree of the spline).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in an N-D space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
A comparison between `splev`, `splder` and `spalde` to compute the derivatives of a
B-spline can be found in the `spalde` examples section.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not allowed. Use BSpline.__call__(x) instead.")
raise ValueError(mesg)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError as e:
raise ValueError(f"Extrapolation mode {ext} is not supported "
"by BSpline.") from e
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
.. legacy:: function
Specifically, we recommend constructing a `BSpline` object and using its
``integrate`` method.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Examples are given :ref:`in the tutorial <tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not allowed. Use BSpline.integrate() instead.")
raise ValueError(mesg)
if full_output != 0:
mesg = (f"full_output = {full_output} is not supported. Proceeding as if "
"full_output = 0")
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
.. legacy:: function
Specifically, we recommend constructing a `BSpline` object and using the
following pattern: `PPoly.from_spline(spl).roots()`.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See Also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
Examples
--------
For some data, this method may miss a root. This happens when one of
the spline knots (which FITPACK places automatically) happens to
coincide with the true root. A workaround is to convert to `PPoly`,
which uses a different root-finding algorithm.
For example,
>>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05]
>>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03,
... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03,
... 6.520430e-03, 8.170770e-03]
>>> from scipy.interpolate import splrep, sproot, PPoly
>>> tck = splrep(x, y, s=0)
>>> sproot(tck)
array([], dtype=float64)
Converting to a PPoly object does find the roots at ``x=2``:
>>> ppoly = PPoly.from_spline(tck)
>>> ppoly.roots(extrapolate=False)
array([2.])
Further examples are given :ref:`in the tutorial
<tutorial-interpolate_splXXX>`.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not allowed.")
raise ValueError(mesg)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate a B-spline and all its derivatives at one point (or set of points) up
to order k (the degree of the spline), being 0 the spline itself.
.. legacy:: function
Specifically, we recommend constructing a `BSpline` object and evaluate
its derivative in a loop or a list comprehension.
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline whose
derivatives to compute.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`, being the first element the
spline itself.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
To calculate the derivatives of a B-spline there are several aproaches.
In this example, we will demonstrate that `spalde` is equivalent to
calling `splev` and `splder`.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import BSpline, spalde, splder, splev
>>> # Store characteristic parameters of a B-spline
>>> tck = ((-2, -2, -2, -2, -1, 0, 1, 2, 2, 2, 2), # knots
... (0, 0, 0, 6, 0, 0, 0), # coefficients
... 3) # degree (cubic)
>>> # Instance a B-spline object
>>> # `BSpline` objects are preferred, except for spalde()
>>> bspl = BSpline(tck[0], tck[1], tck[2])
>>> # Generate extra points to get a smooth curve
>>> x = np.linspace(min(tck[0]), max(tck[0]), 100)
Evaluate the curve and all derivatives
>>> # The order of derivative must be less or equal to k, the degree of the spline
>>> # Method 1: spalde()
>>> f1_y_bsplin = [spalde(i, tck)[0] for i in x ] # The B-spline itself
>>> f1_y_deriv1 = [spalde(i, tck)[1] for i in x ] # 1st derivative
>>> f1_y_deriv2 = [spalde(i, tck)[2] for i in x ] # 2nd derivative
>>> f1_y_deriv3 = [spalde(i, tck)[3] for i in x ] # 3rd derivative
>>> # You can reach the same result by using `splev`and `splder`
>>> f2_y_deriv3 = splev(x, bspl, der=3)
>>> f3_y_deriv3 = splder(bspl, n=3)(x)
>>> # Generate a figure with three axes for graphic comparison
>>> fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 5))
>>> suptitle = fig.suptitle(f'Evaluate a B-spline and all derivatives')
>>> # Plot B-spline and all derivatives using the three methods
>>> orders = range(4)
>>> linetypes = ['-', '--', '-.', ':']
>>> labels = ['B-Spline', '1st deriv.', '2nd deriv.', '3rd deriv.']
>>> functions = ['splev()', 'splder()', 'spalde()']
>>> for order, linetype, label in zip(orders, linetypes, labels):
... ax1.plot(x, splev(x, bspl, der=order), linetype, label=label)
... ax2.plot(x, splder(bspl, n=order)(x), linetype, label=label)
... ax3.plot(x, [spalde(i, tck)[order] for i in x], linetype, label=label)
>>> for ax, function in zip((ax1, ax2, ax3), functions):
... ax.set_title(function)
... ax.legend()
>>> plt.tight_layout()
>>> plt.show()
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
.. legacy:: function
Specifically, we recommend constructing a `BSpline` object and using
its ``insert_knot`` method.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : float
A knot value at which to insert a new knot. If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects, in particular `BSpline.insert_knot`
method.
See Also
--------
BSpline.insert_knot
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
Examples
--------
You can insert knots into a B-spline.
>>> from scipy.interpolate import splrep, insert
>>> import numpy as np
>>> x = np.linspace(0, 10, 5)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> tck[0]
array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.])
A knot is inserted:
>>> tck_inserted = insert(3, tck)
>>> tck_inserted[0]
array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.])
Some knots are inserted:
>>> tck_inserted2 = insert(8, tck, m=3)
>>> tck_inserted2[0]
array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
.. legacy:: function
Specifically, we recommend constructing a `BSpline` object and using its
``derivative`` method.
Parameters
----------
tck : BSpline instance or tuple
BSpline instance or a tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline whose
derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned if the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splantider, splev, spalde
BSpline
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> import numpy as np
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
A comparison between `splev`, `splder` and `spalde` to compute the derivatives of a
B-spline can be found in the `spalde` examples section.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
.. legacy:: function
Specifically, we recommend constructing a `BSpline` object and using its
``antiderivative`` method.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> import numpy as np
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)

View file

@ -0,0 +1,996 @@
""" Replicate FITPACK's logic for constructing smoothing spline functions and curves.
Currently provides analogs of splrep and splprep python routines, i.e.
curfit.f and parcur.f routines (the drivers are fpcurf.f and fppara.f, respectively)
The Fortran sources are from
https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
:doi:`10.1016/0146-664X(82)90043-0`.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
.. [3] P. Dierckx, "An algorithm for smoothing, differentiation and integration
of experimental data using spline functions",
Journal of Computational and Applied Mathematics, vol. I, no 3, p. 165 (1975).
https://doi.org/10.1016/0771-050X(75)90034-0
"""
import warnings
import operator
import numpy as np
from ._bsplines import (
_not_a_knot, make_interp_spline, BSpline, fpcheck, _lsq_solve_qr
)
from . import _dierckx # type: ignore[attr-defined]
# cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
# c part 1: determination of the number of knots and their position c
# c ************************************************************** c
#
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L31
# Hardcoded in curfit.f
TOL = 0.001
MAXIT = 20
def _get_residuals(x, y, t, k, w):
# FITPACK has (w*(spl(x)-y))**2; make_lsq_spline has w*(spl(x)-y)**2
w2 = w**2
# inline the relevant part of
# >>> spl = make_lsq_spline(x, y, w=w2, t=t, k=k)
# NB:
# 1. y is assumed to be 2D here. For 1D case (parametric=False),
# the call must have been preceded by y = y[:, None] (cf _validate_inputs)
# 2. We always sum the squares across axis=1:
# * For 1D (parametric=False), the last dimension has size one,
# so the summation is a no-op.
# * For 2D (parametric=True), the summation is actually how the
# 'residuals' are defined, see Eq. (42) in Dierckx1982
# (the reference is in the docstring of `class F`) below.
_, _, c = _lsq_solve_qr(x, y, t, k, w)
c = np.ascontiguousarray(c)
spl = BSpline(t, c, k)
residuals = _compute_residuals(w2, spl(x), y)
fp = residuals.sum()
if np.isnan(fp):
raise ValueError(_iermesg[1])
return residuals, fp
def _compute_residuals(w2, splx, y):
delta = ((splx - y)**2).sum(axis=1)
return w2 * delta
def add_knot(x, t, k, residuals):
"""Add a new knot.
(Approximately) replicate FITPACK's logic:
1. split the `x` array into knot intervals, ``t(j+k) <= x(i) <= t(j+k+1)``
2. find the interval with the maximum sum of residuals
3. insert a new knot into the middle of that interval.
NB: a new knot is in fact an `x` value at the middle of the interval.
So *the knots are a subset of `x`*.
This routine is an analog of
https://github.com/scipy/scipy/blob/v1.11.4/scipy/interpolate/fitpack/fpcurf.f#L190-L215
(cf _split function)
and https://github.com/scipy/scipy/blob/v1.11.4/scipy/interpolate/fitpack/fpknot.f
"""
new_knot = _dierckx.fpknot(x, t, k, residuals)
idx_t = np.searchsorted(t, new_knot)
t_new = np.r_[t[:idx_t], new_knot, t[idx_t:]]
return t_new
def _validate_inputs(x, y, w, k, s, xb, xe, parametric):
"""Common input validations for generate_knots and make_splrep.
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if w is None:
w = np.ones_like(x, dtype=float)
else:
w = np.asarray(w, dtype=float)
if w.ndim != 1:
raise ValueError(f"{w.ndim = } not implemented yet.")
if (w < 0).any():
raise ValueError("Weights must be non-negative")
if y.ndim == 0 or y.ndim > 2:
raise ValueError(f"{y.ndim = } not supported (must be 1 or 2.)")
parametric = bool(parametric)
if parametric:
if y.ndim != 2:
raise ValueError(f"{y.ndim = } != 2 not supported with {parametric =}.")
else:
if y.ndim != 1:
raise ValueError(f"{y.ndim = } != 1 not supported with {parametric =}.")
# all _impl functions expect y.ndim = 2
y = y[:, None]
if w.shape[0] != x.shape[0]:
raise ValueError(f"Weights is incompatible: {w.shape =} != {x.shape}.")
if x.shape[0] != y.shape[0]:
raise ValueError(f"Data is incompatible: {x.shape = } and {y.shape = }.")
if x.ndim != 1 or (x[1:] < x[:-1]).any():
raise ValueError("Expect `x` to be an ordered 1D sequence.")
k = operator.index(k)
if s < 0:
raise ValueError(f"`s` must be non-negative. Got {s = }")
if xb is None:
xb = min(x)
if xe is None:
xe = max(x)
return x, y, w, k, s, xb, xe
def generate_knots(x, y, *, w=None, xb=None, xe=None, k=3, s=0, nest=None):
"""Generate knot vectors until the Least SQuares (LSQ) criterion is satified.
Parameters
----------
x, y : array_like
The data points defining the curve ``y = f(x)``.
w : array_like, optional
Weights.
xb : float, optional
The boundary of the approximation interval. If None (default),
is set to ``x[0]``.
xe : float, optional
The boundary of the approximation interval. If None (default),
is set to ``x[-1]``.
k : int, optional
The spline degree. Default is cubic, ``k = 3``.
s : float, optional
The smoothing factor. Default is ``s = 0``.
nest : int, optional
Stop when at least this many knots are placed.
Yields
------
t : ndarray
Knot vectors with an increasing number of knots.
The generator is finite: it stops when the smoothing critetion is
satisfied, or when then number of knots exceeds the maximum value:
the user-provided `nest` or `x.size + k + 1` --- which is the knot vector
for the interpolating spline.
Examples
--------
Generate some noisy data and fit a sequence of LSQ splines:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import make_lsq_spline, generate_knots
>>> rng = np.random.default_rng(12345)
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * rng.standard_normal(size=50)
>>> knots = list(generate_knots(x, y, s=1e-10))
>>> for t in knots[::3]:
... spl = make_lsq_spline(x, y, t)
... xs = xs = np.linspace(-3, 3, 201)
... plt.plot(xs, spl(xs), '-', label=f'n = {len(t)}', lw=3, alpha=0.7)
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.exp(-xs**2), '--')
>>> plt.legend()
Note that increasing the number of knots make the result follow the data
more and more closely.
Also note that a step of the generator may add multiple knots:
>>> [len(t) for t in knots]
[8, 9, 10, 12, 16, 24, 40, 48, 52, 54]
Notes
-----
The routine generates successive knots vectors of increasing length, starting
from ``2*(k+1)`` to ``len(x) + k + 1``, trying to make knots more dense
in the regions where the deviation of the LSQ spline from data is large.
When the maximum number of knots, ``len(x) + k + 1`` is reached
(this happens when ``s`` is small and ``nest`` is large), the generator
stops, and the last output is the knots for the interpolation with the
not-a-knot boundary condition.
Knots are located at data sites, unless ``k`` is even and the number of knots
is ``len(x) + k + 1``. In that case, the last output of the generator
has internal knots at Greville sites, ``(x[1:] + x[:-1]) / 2``.
.. versionadded:: 1.15.0
"""
if s == 0:
if nest is not None or w is not None:
raise ValueError("s == 0 is interpolation only")
t = _not_a_knot(x, k)
yield t
return
x, y, w, k, s, xb, xe = _validate_inputs(
x, y, w, k, s, xb, xe, parametric=np.ndim(y) == 2
)
yield from _generate_knots_impl(x, y, w=w, xb=xb, xe=xe, k=k, s=s, nest=nest)
def _generate_knots_impl(x, y, *, w=None, xb=None, xe=None, k=3, s=0, nest=None):
acc = s * TOL
m = x.size # the number of data points
if nest is None:
# the max number of knots. This is set in _fitpack_impl.py line 274
# and fitpack.pyf line 198
nest = max(m + k + 1, 2*k + 3)
else:
if nest < 2*(k + 1):
raise ValueError(f"`nest` too small: {nest = } < 2*(k+1) = {2*(k+1)}.")
nmin = 2*(k + 1) # the number of knots for an LSQ polynomial approximation
nmax = m + k + 1 # the number of knots for the spline interpolation
# start from no internal knots
t = np.asarray([xb]*(k+1) + [xe]*(k+1), dtype=float)
n = t.shape[0]
fp = 0.0
fpold = 0.0
# c main loop for the different sets of knots. m is a safe upper bound
# c for the number of trials.
for _ in range(m):
yield t
# construct the LSQ spline with this set of knots
fpold = fp
residuals, fp = _get_residuals(x, y, t, k, w=w)
fpms = fp - s
# c test whether the approximation sinf(x) is an acceptable solution.
# c if f(p=inf) < s accept the choice of knots.
if (abs(fpms) < acc) or (fpms < 0):
return
# ### c increase the number of knots. ###
# c determine the number of knots nplus we are going to add.
if n == nmin:
# the first iteration
nplus = 1
else:
delta = fpold - fp
npl1 = int(nplus * fpms / delta) if delta > acc else nplus*2
nplus = min(nplus*2, max(npl1, nplus//2, 1))
# actually add knots
for j in range(nplus):
t = add_knot(x, t, k, residuals)
# check if we have enough knots already
n = t.shape[0]
# c if n = nmax, sinf(x) is an interpolating spline.
# c if n=nmax we locate the knots as for interpolation.
if n >= nmax:
t = _not_a_knot(x, k)
yield t
return
# c if n=nest we cannot increase the number of knots because of
# c the storage capacity limitation.
if n >= nest:
yield t
return
# recompute if needed
if j < nplus - 1:
residuals, _ = _get_residuals(x, y, t, k, w=w)
# this should never be reached
return
# cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
# c part 2: determination of the smoothing spline sp(x). c
# c *************************************************** c
# c we have determined the number of knots and their position. c
# c we now compute the b-spline coefficients of the smoothing spline c
# c sp(x). the observation matrix a is extended by the rows of matrix c
# c b expressing that the kth derivative discontinuities of sp(x) at c
# c the interior knots t(k+2),...t(n-k-1) must be zero. the corres- c
# c ponding weights of these additional rows are set to 1/p. c
# c iteratively we then have to determine the value of p such that c
# c f(p)=sum((w(i)*(y(i)-sp(x(i))))**2) be = s. we already know that c
# c the least-squares kth degree polynomial corresponds to p=0, and c
# c that the least-squares spline corresponds to p=infinity. the c
# c iteration process which is proposed here, makes use of rational c
# c interpolation. since f(p) is a convex and strictly decreasing c
# c function of p, it can be approximated by a rational function c
# c r(p) = (u*p+v)/(p+w). three values of p(p1,p2,p3) with correspond- c
# c ing values of f(p) (f1=f(p1)-s,f2=f(p2)-s,f3=f(p3)-s) are used c
# c to calculate the new value of p such that r(p)=s. convergence is c
# c guaranteed by taking f1>0 and f3<0. c
# cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc
def prodd(t, i, j, k):
res = 1.0
for s in range(k+2):
if i + s != j:
res *= (t[j] - t[i+s])
return res
def disc(t, k):
"""Discontinuity matrix: jumps of k-th derivatives of b-splines at internal knots.
See Eqs. (9)-(10) of Ref. [1], or, equivalently, Eq. (3.43) of Ref. [2].
This routine assumes internal knots are all simple (have multiplicity =1).
Parameters
----------
t : ndarray, 1D, shape(n,)
Knots.
k : int
The spline degree
Returns
-------
disc : ndarray, shape(n-2*k-1, k+2)
The jumps of the k-th derivatives of b-splines at internal knots,
``t[k+1], ...., t[n-k-1]``.
offset : ndarray, shape(2-2*k-1,)
Offsets
nc : int
Notes
-----
The normalization here follows FITPACK:
(https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpdisc.f#L36)
The k-th derivative jumps are multiplied by a factor::
(delta / nrint)**k / k!
where ``delta`` is the length of the interval spanned by internal knots, and
``nrint`` is one less the number of internal knots (i.e., the number of
subintervals between them).
References
----------
.. [1] Paul Dierckx, Algorithms for smoothing data with periodic and parametric
splines, Computer Graphics and Image Processing, vol. 20, p. 171 (1982).
:doi:`10.1016/0146-664X(82)90043-0`
.. [2] Tom Lyche and Knut Morken, Spline methods,
http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
"""
n = t.shape[0]
# the length of the base interval spanned by internal knots & the number
# of subintervas between these internal knots
delta = t[n - k - 1] - t[k]
nrint = n - 2*k - 1
matr = np.empty((nrint - 1, k + 2), dtype=float)
for jj in range(nrint - 1):
j = jj + k + 1
for ii in range(k + 2):
i = jj + ii
matr[jj, ii] = (t[i + k + 1] - t[i]) / prodd(t, i, j, k)
# NB: equivalent to
# row = [(t[i + k + 1] - t[i]) / prodd(t, i, j, k) for i in range(j-k-1, j+1)]
# assert (matr[j-k-1, :] == row).all()
# follow FITPACK
matr *= (delta/ nrint)**k
# make it packed
offset = np.array([i for i in range(nrint-1)], dtype=np.int64)
nc = n - k - 1
return matr, offset, nc
class F:
""" The r.h.s. of ``f(p) = s``.
Given scalar `p`, we solve the system of equations in the LSQ sense:
| A | @ | c | = | y |
| B / p | | 0 | | 0 |
where `A` is the matrix of b-splines and `b` is the discontinuity matrix
(the jumps of the k-th derivatives of b-spline basis elements at knots).
Since we do that repeatedly while minimizing over `p`, we QR-factorize
`A` only once and update the QR factorization only of the `B` rows of the
augmented matrix |A, B/p|.
The system of equations is Eq. (15) Ref. [1]_, the strategy and implementation
follows that of FITPACK, see specific links below.
References
----------
[1] P. Dierckx, Algorithms for Smoothing Data with Periodic and Parametric Splines,
COMPUTER GRAPHICS AND IMAGE PROCESSING vol. 20, pp 171-184 (1982.)
https://doi.org/10.1016/0146-664X(82)90043-0
"""
def __init__(self, x, y, t, k, s, w=None, *, R=None, Y=None):
self.x = x
self.y = y
self.t = t
self.k = k
w = np.ones_like(x, dtype=float) if w is None else w
if w.ndim != 1:
raise ValueError(f"{w.ndim = } != 1.")
self.w = w
self.s = s
if y.ndim != 2:
raise ValueError(f"F: expected y.ndim == 2, got {y.ndim = } instead.")
# ### precompute what we can ###
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L250
# c evaluate the discontinuity jump of the kth derivative of the
# c b-splines at the knots t(l),l=k+2,...n-k-1 and store in b.
b, b_offset, b_nc = disc(t, k)
# the QR factorization of the data matrix, if not provided
# NB: otherwise, must be consistent with x,y & s, but this is not checked
if R is None and Y is None:
R, Y, _ = _lsq_solve_qr(x, y, t, k, w)
# prepare to combine R and the discontinuity matrix (AB); also r.h.s. (YY)
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L269
# c the rows of matrix b with weight 1/p are rotated into the
# c triangularised observation matrix a which is stored in g.
nc = t.shape[0] - k - 1
nz = k + 1
if R.shape[1] != nz:
raise ValueError(f"Internal error: {R.shape[1] =} != {k+1 =}.")
# r.h.s. of the augmented system
z = np.zeros((b.shape[0], Y.shape[1]), dtype=float)
self.YY = np.r_[Y[:nc], z]
# l.h.s. of the augmented system
AA = np.zeros((nc + b.shape[0], self.k+2), dtype=float)
AA[:nc, :nz] = R[:nc, :]
# AA[nc:, :] = b.a / p # done in __call__(self, p)
self.AA = AA
self.offset = np.r_[np.arange(nc, dtype=np.int64), b_offset]
self.nc = nc
self.b = b
def __call__(self, p):
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L279
# c the row of matrix b is rotated into triangle by givens transformation
# copy the precomputed matrices over for in-place work
# R = PackedMatrix(self.AB.a.copy(), self.AB.offset.copy(), nc)
AB = self.AA.copy()
offset = self.offset.copy()
nc = self.nc
AB[nc:, :] = self.b / p
QY = self.YY.copy()
# heavy lifting happens here, in-place
_dierckx.qr_reduce(AB, offset, nc, QY, startrow=nc)
# solve for the coefficients
c = _dierckx.fpback(AB, nc, QY)
spl = BSpline(self.t, c, self.k)
residuals = _compute_residuals(self.w**2, spl(self.x), self.y)
fp = residuals.sum()
self.spl = spl # store it
return fp - self.s
def fprati(p1, f1, p2, f2, p3, f3):
"""The root of r(p) = (u*p + v) / (p + w) given three points and values,
(p1, f2), (p2, f2) and (p3, f3).
The FITPACK analog adjusts the bounds, and we do not
https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fprati.f
NB: FITPACK uses p < 0 to encode p=infinity. We just use the infinity itself.
Since the bracket is ``p1 <= p2 <= p3``, ``p3`` can be infinite (in fact,
this is what the minimizer starts with, ``p3=inf``).
"""
h1 = f1 * (f2 - f3)
h2 = f2 * (f3 - f1)
h3 = f3 * (f1 - f2)
if p3 == np.inf:
return -(p2*h1 + p1*h2) / h3
return -(p1*p2*h3 + p2*p3*h1 + p1*p3*h2) / (p1*h1 + p2*h2 + p3*h3)
class Bunch:
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
_iermesg1 = """error. a theoretically impossible result was found during
the iteration process for finding a smoothing spline with
fp = s. probably causes : s too small.
"""
_iermesg = {
1: _iermesg1 + """the weighted sum of squared residuals is becoming NaN
""",
2: _iermesg1 + """there is an approximation returned but the corresponding
weighted sum of squared residuals does not satisfy the
condition abs(fp-s)/s < tol.
""",
3: """error. the maximal number of iterations maxit (set to 20
by the program) allowed for finding a smoothing spline
with fp=s has been reached. probably causes : s too small
there is an approximation returned but the corresponding
weighted sum of squared residuals does not satisfy the
condition abs(fp-s)/s < tol.
"""
}
def root_rati(f, p0, bracket, acc):
"""Solve `f(p) = 0` using a rational function approximation.
In a nutshell, since the function f(p) is known to be monotonically decreasing, we
- maintain the bracket (p1, f1), (p2, f2) and (p3, f3)
- at each iteration step, approximate f(p) by a rational function
r(p) = (u*p + v) / (p + w)
and make a step to p_new to the root of f(p): r(p_new) = 0.
The coefficients u, v and w are found from the bracket values p1..3 and f1...3
The algorithm and implementation follows
https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L229
and
https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fppara.f#L290
Note that the latter is for parametric splines and the former is for 1D spline
functions. The minimization is indentical though [modulo a summation over the
dimensions in the computation of f(p)], so we reuse the minimizer for both
d=1 and d>1.
"""
# Magic values from
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L27
con1 = 0.1
con9 = 0.9
con4 = 0.04
# bracketing flags (follow FITPACK)
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fppara.f#L365
ich1, ich3 = 0, 0
(p1, f1), (p3, f3) = bracket
p = p0
for it in range(MAXIT):
p2, f2 = p, f(p)
# c test whether the approximation sp(x) is an acceptable solution.
if abs(f2) < acc:
ier, converged = 0, True
break
# c carry out one more step of the iteration process.
if ich3 == 0:
if f2 - f3 <= acc:
# c our initial choice of p is too large.
p3 = p2
f3 = f2
p = p*con4
if p <= p1:
p = p1*con9 + p2*con1
continue
else:
if f2 < 0:
ich3 = 1
if ich1 == 0:
if f1 - f2 <= acc:
# c our initial choice of p is too small
p1 = p2
f1 = f2
p = p/con4
if p3 != np.inf and p <= p3:
p = p2*con1 + p3*con9
continue
else:
if f2 > 0:
ich1 = 1
# c test whether the iteration process proceeds as theoretically expected.
# [f(p) should be monotonically decreasing]
if f1 <= f2 or f2 <= f3:
ier, converged = 2, False
break
# actually make the iteration step
p = fprati(p1, f1, p2, f2, p3, f3)
# c adjust the value of p1,f1,p3 and f3 such that f1 > 0 and f3 < 0.
if f2 < 0:
p3, f3 = p2, f2
else:
p1, f1 = p2, f2
else:
# not converged in MAXIT iterations
ier, converged = 3, False
if ier != 0:
warnings.warn(RuntimeWarning(_iermesg[ier]), stacklevel=2)
return Bunch(converged=converged, root=p, iterations=it, ier=ier)
def _make_splrep_impl(x, y, *, w=None, xb=None, xe=None, k=3, s=0, t=None, nest=None):
"""Shared infra for make_splrep and make_splprep.
"""
acc = s * TOL
m = x.size # the number of data points
if nest is None:
# the max number of knots. This is set in _fitpack_impl.py line 274
# and fitpack.pyf line 198
nest = max(m + k + 1, 2*k + 3)
else:
if nest < 2*(k + 1):
raise ValueError(f"`nest` too small: {nest = } < 2*(k+1) = {2*(k+1)}.")
if t is not None:
raise ValueError("Either supply `t` or `nest`.")
if t is None:
gen = _generate_knots_impl(x, y, w=w, k=k, s=s, xb=xb, xe=xe, nest=nest)
t = list(gen)[-1]
else:
fpcheck(x, t, k)
if t.shape[0] == 2 * (k + 1):
# nothing to optimize
_, _, c = _lsq_solve_qr(x, y, t, k, w)
return BSpline(t, c, k)
### solve ###
# c initial value for p.
# https://github.com/scipy/scipy/blob/maintenance/1.11.x/scipy/interpolate/fitpack/fpcurf.f#L253
R, Y, _ = _lsq_solve_qr(x, y, t, k, w)
nc = t.shape[0] -k -1
p = nc / R[:, 0].sum()
# ### bespoke solver ####
# initial conditions
# f(p=inf) : LSQ spline with knots t (XXX: reuse R, c)
_, fp = _get_residuals(x, y, t, k, w=w)
fpinf = fp - s
# f(p=0): LSQ spline without internal knots
_, fp0 = _get_residuals(x, y, np.array([xb]*(k+1) + [xe]*(k+1)), k, w)
fp0 = fp0 - s
# solve
bracket = (0, fp0), (np.inf, fpinf)
f = F(x, y, t, k=k, s=s, w=w, R=R, Y=Y)
_ = root_rati(f, p, bracket, acc)
# solve ALTERNATIVE: is roughly equivalent, gives slightly different results
# starting from scratch, that would have probably been tolerable;
# backwards compatibility dictates that we replicate the FITPACK minimizer though.
# f = F(x, y, t, k=k, s=s, w=w, R=R, Y=Y)
# from scipy.optimize import root_scalar
# res_ = root_scalar(f, x0=p, rtol=acc)
# assert res_.converged
# f.spl is the spline corresponding to the found `p` value
return f.spl
def make_splrep(x, y, *, w=None, xb=None, xe=None, k=3, s=0, t=None, nest=None):
r"""Create a smoothing B-spline function with bounded error, minimizing derivative jumps.
Given the set of data points ``(x[i], y[i])``, determine a smooth spline
approximation of degree ``k`` on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like, shape (m,)
The data points defining a curve ``y = f(x)``.
w : array_like, shape (m,), optional
Strictly positive 1D array of weights, of the same length as `x` and `y`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector ``d``, then `w` should be ``1/d``.
Default is ``np.ones(m)``.
xb, xe : float, optional
The interval to fit. If None, these default to ``x[0]`` and ``x[-1]``,
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines,
``k=3``, which is the default. Even values of `k` should be avoided,
especially with small `s` values.
s : float, optional
The smoothing condition. The amount of smoothness is determined by
satisfying the LSQ (least-squares) constraint::
sum((w * (g(x) - y))**2 ) <= s
where ``g(x)`` is the smoothed fit to ``(x, y)``. The user can use `s`
to control the tradeoff between closeness to data and smoothness of fit.
Larger `s` means more smoothing while smaller values of `s` indicate less
smoothing.
Recommended values of `s` depend on the weights, `w`. If the weights
represent the inverse of the standard deviation of `y`, then a good `s`
value should be found in the range ``(m-sqrt(2*m), m+sqrt(2*m))`` where
``m`` is the number of datapoints in `x`, `y`, and `w`.
Default is ``s = 0.0``, i.e. interpolation.
t : array_like, optional
The spline knots. If None (default), the knots will be constructed
automatically.
There must be at least ``2*k + 2`` and at most ``m + k + 1`` knots.
nest : int, optional
The target length of the knot vector. Should be between ``2*(k + 1)``
(the minimum number of knots for a degree-``k`` spline), and
``m + k + 1`` (the number of knots of the interpolating spline).
The actual number of knots returned by this routine may be slightly
larger than `nest`.
Default is None (no limit, add up to ``m + k + 1`` knots).
Returns
-------
spl : a `BSpline` instance
For `s=0`, ``spl(x) == y``.
For non-zero values of `s` the `spl` represents the smoothed approximation
to `(x, y)`, generally with fewer knots.
See Also
--------
generate_knots : is used under the hood for generating the knots
make_splprep : the analog of this routine for parametric curves
make_interp_spline : construct an interpolating spline (``s = 0``)
make_lsq_spline : construct the least-squares spline given the knot vector
splrep : a FITPACK analog of this routine
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Notes
-----
This routine constructs the smoothing spline function, :math:`g(x)`, to
minimize the sum of jumps, :math:`D_j`, of the ``k``-th derivative at the
internal knots (:math:`x_b < t_i < x_e`), where
.. math::
D_i = g^{(k)}(t_i + 0) - g^{(k)}(t_i - 0)
Specifically, the routine constructs the spline function :math:`g(x)` which
minimizes
.. math::
\sum_i | D_i |^2 \to \mathrm{min}
provided that
.. math::
\sum_{j=1}^m (w_j \times (g(x_j) - y_j))^2 \leqslant s ,
where :math:`s > 0` is the input parameter.
In other words, we balance maximizing the smoothness (measured as the jumps
of the derivative, the first criterion), and the deviation of :math:`g(x_j)`
from the data :math:`y_j` (the second criterion).
Note that the summation in the second criterion is over all data points,
and in the first criterion it is over the internal spline knots (i.e.
those with ``xb < t[i] < xe``). The spline knots are in general a subset
of data, see `generate_knots` for details.
Also note the difference of this routine to `make_lsq_spline`: the latter
routine does not consider smoothness and simply solves a least-squares
problem
.. math::
\sum w_j \times (g(x_j) - y_j)^2 \to \mathrm{min}
for a spline function :math:`g(x)` with a _fixed_ knot vector ``t``.
.. versionadded:: 1.15.0
""" # noqa:E501
if s == 0:
if t is not None or w is not None or nest is not None:
raise ValueError("s==0 is for interpolation only")
return make_interp_spline(x, y, k=k)
x, y, w, k, s, xb, xe = _validate_inputs(x, y, w, k, s, xb, xe, parametric=False)
spl = _make_splrep_impl(x, y, w=w, xb=xb, xe=xe, k=k, s=s, t=t, nest=nest)
# postprocess: squeeze out the last dimension: was added to simplify the internals.
spl.c = spl.c[:, 0]
return spl
def make_splprep(x, *, w=None, u=None, ub=None, ue=None, k=3, s=0, t=None, nest=None):
r"""
Create a smoothing parametric B-spline curve with bounded error, minimizing derivative jumps.
Given a list of N 1D arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve ``g(u)``.
Parameters
----------
x : array_like, shape (m, ndim)
Sampled data points representing the curve in ``ndim`` dimensions.
The typical use is a list of 1D arrays, each of length ``m``.
w : array_like, shape(m,), optional
Strictly positive 1D array of weights.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard deviation given by
the vector d, then `w` should be 1/d. Default is ``np.ones(m)``.
u : array_like, optional
An array of parameter values for the curve in the parametric form.
If not given, these values are calculated automatically, according to::
v[0] = 0
v[i] = v[i-1] + distance(x[i], x[i-1])
u[i] = v[i] / v[-1]
ub, ue : float, optional
The end-points of the parameters interval. Default to ``u[0]`` and ``u[-1]``.
k : int, optional
Degree of the spline. Cubic splines, ``k=3``, are recommended.
Even values of `k` should be avoided especially with a small ``s`` value.
Default is ``k=3``
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions::
sum((w * (g(u) - x))**2) <= s,
where ``g(u)`` is the smoothed approximation to ``x``. The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger ``s`` means more smoothing while smaller values of ``s``
indicate less smoothing.
Recommended values of ``s`` depend on the weights, ``w``. If the weights
represent the inverse of the standard deviation of ``x``, then a good
``s`` value should be found in the range ``(m - sqrt(2*m), m + sqrt(2*m))``,
where ``m`` is the number of data points in ``x`` and ``w``.
t : array_like, optional
The spline knots. If None (default), the knots will be constructed
automatically.
There must be at least ``2*k + 2`` and at most ``m + k + 1`` knots.
nest : int, optional
The target length of the knot vector. Should be between ``2*(k + 1)``
(the minimum number of knots for a degree-``k`` spline), and
``m + k + 1`` (the number of knots of the interpolating spline).
The actual number of knots returned by this routine may be slightly
larger than `nest`.
Default is None (no limit, add up to ``m + k + 1`` knots).
Returns
-------
spl : a `BSpline` instance
For `s=0`, ``spl(u) == x``.
For non-zero values of ``s``, `spl` represents the smoothed approximation
to ``x``, generally with fewer knots.
u : ndarray
The values of the parameters
See Also
--------
generate_knots : is used under the hood for generating the knots
make_splrep : the analog of this routine 1D functions
make_interp_spline : construct an interpolating spline (``s = 0``)
make_lsq_spline : construct the least-squares spline given the knot vector
splprep : a FITPACK analog of this routine
Notes
-----
Given a set of :math:`m` data points in :math:`D` dimensions, :math:`\vec{x}_j`,
with :math:`j=1, ..., m` and :math:`\vec{x}_j = (x_{j; 1}, ..., x_{j; D})`,
this routine constructs the parametric spline curve :math:`g_a(u)` with
:math:`a=1, ..., D`, to minimize the sum of jumps, :math:`D_{i; a}`, of the
``k``-th derivative at the internal knots (:math:`u_b < t_i < u_e`), where
.. math::
D_{i; a} = g_a^{(k)}(t_i + 0) - g_a^{(k)}(t_i - 0)
Specifically, the routine constructs the spline function :math:`g(u)` which
minimizes
.. math::
\sum_i \sum_{a=1}^D | D_{i; a} |^2 \to \mathrm{min}
provided that
.. math::
\sum_{j=1}^m \sum_{a=1}^D (w_j \times (g_a(u_j) - x_{j; a}))^2 \leqslant s
where :math:`u_j` is the value of the parameter corresponding to the data point
:math:`(x_{j; 1}, ..., x_{j; D})`, and :math:`s > 0` is the input parameter.
In other words, we balance maximizing the smoothness (measured as the jumps
of the derivative, the first criterion), and the deviation of :math:`g(u_j)`
from the data :math:`x_j` (the second criterion).
Note that the summation in the second criterion is over all data points,
and in the first criterion it is over the internal spline knots (i.e.
those with ``ub < t[i] < ue``). The spline knots are in general a subset
of data, see `generate_knots` for details.
.. versionadded:: 1.15.0
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
""" # noqa:E501
x = np.stack(x, axis=1)
# construct the default parametrization of the curve
if u is None:
dp = (x[1:, :] - x[:-1, :])**2
u = np.sqrt((dp).sum(axis=1)).cumsum()
u = np.r_[0, u / u[-1]]
if s == 0:
if t is not None or w is not None or nest is not None:
raise ValueError("s==0 is for interpolation only")
return make_interp_spline(u, x.T, k=k, axis=1), u
u, x, w, k, s, ub, ue = _validate_inputs(u, x, w, k, s, ub, ue, parametric=True)
spl = _make_splrep_impl(u, x, w=w, xb=ub, xe=ue, k=k, s=s, t=t, nest=nest)
# posprocess: `axis=1` so that spl(u).shape == np.shape(x)
# when `x` is a list of 1D arrays (cf original splPrep)
cc = spl.c.T
spl1 = BSpline(spl.t, cc, spl.k, axis=1)
return spl1, u

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,415 @@
import itertools
import functools
import operator
import numpy as np
from math import prod
from . import _dierckx # type: ignore[attr-defined]
import scipy.sparse.linalg as ssl
from scipy.sparse import csr_array
from ._bsplines import _not_a_knot
__all__ = ["NdBSpline"]
def _get_dtype(dtype):
"""Return np.complex128 for complex dtypes, np.float64 otherwise."""
if np.issubdtype(dtype, np.complexfloating):
return np.complex128
else:
return np.float64
class NdBSpline:
"""Tensor product spline object.
The value at point ``xp = (x1, x2, ..., xN)`` is evaluated as a linear
combination of products of one-dimensional b-splines in each of the ``N``
dimensions::
c[i1, i2, ..., iN] * B(x1; i1, t1) * B(x2; i2, t2) * ... * B(xN; iN, tN)
Here ``B(x; i, t)`` is the ``i``-th b-spline defined by the knot vector
``t`` evaluated at ``x``.
Parameters
----------
t : tuple of 1D ndarrays
knot vectors in directions 1, 2, ... N,
``len(t[i]) == n[i] + k + 1``
c : ndarray, shape (n1, n2, ..., nN, ...)
b-spline coefficients
k : int or length-d tuple of integers
spline degrees.
A single integer is interpreted as having this degree for
all dimensions.
extrapolate : bool, optional
Whether to extrapolate out-of-bounds inputs, or return `nan`.
Default is to extrapolate.
Attributes
----------
t : tuple of ndarrays
Knots vectors.
c : ndarray
Coefficients of the tensor-product spline.
k : tuple of integers
Degrees for each dimension.
extrapolate : bool, optional
Whether to extrapolate or return nans for out-of-bounds inputs.
Defaults to true.
Methods
-------
__call__
design_matrix
See Also
--------
BSpline : a one-dimensional B-spline object
NdPPoly : an N-dimensional piecewise tensor product polynomial
"""
def __init__(self, t, c, k, *, extrapolate=None):
self._k, self._indices_k1d, (self._t, self._len_t) = _preprocess_inputs(k, t)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
self.c = np.asarray(c)
ndim = self._t.shape[0] # == len(self.t)
if self.c.ndim < ndim:
raise ValueError(f"Coefficients must be at least {ndim}-dimensional.")
for d in range(ndim):
td = self.t[d]
kd = self.k[d]
n = td.shape[0] - kd - 1
if self.c.shape[d] != n:
raise ValueError(f"Knots, coefficients and degree in dimension"
f" {d} are inconsistent:"
f" got {self.c.shape[d]} coefficients for"
f" {len(td)} knots, need at least {n} for"
f" k={k}.")
dt = _get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dt)
@property
def k(self):
return tuple(self._k)
@property
def t(self):
# repack the knots into a tuple
return tuple(self._t[d, :self._len_t[d]] for d in range(self._t.shape[0]))
def __call__(self, xi, *, nu=None, extrapolate=None):
"""Evaluate the tensor product b-spline at ``xi``.
Parameters
----------
xi : array_like, shape(..., ndim)
The coordinates to evaluate the interpolator at.
This can be a list or tuple of ndim-dimensional points
or an array with the shape (num_points, ndim).
nu : array_like, optional, shape (ndim,)
Orders of derivatives to evaluate. Each must be non-negative.
Defaults to the zeroth derivivative.
extrapolate : bool, optional
Whether to exrapolate based on first and last intervals in each
dimension, or return `nan`. Default is to ``self.extrapolate``.
Returns
-------
values : ndarray, shape ``xi.shape[:-1] + self.c.shape[ndim:]``
Interpolated values at ``xi``
"""
ndim = self._t.shape[0] # == len(self.t)
if extrapolate is None:
extrapolate = self.extrapolate
extrapolate = bool(extrapolate)
if nu is None:
nu = np.zeros((ndim,), dtype=np.int64)
else:
nu = np.asarray(nu, dtype=np.int64)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError(
f"invalid number of derivative orders {nu = } for "
f"ndim = {len(self.t)}.")
if any(nu < 0):
raise ValueError(f"derivatives must be positive, got {nu = }")
# prepare xi : shape (..., m1, ..., md) -> (1, m1, ..., md)
xi = np.asarray(xi, dtype=float)
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
xi = np.ascontiguousarray(xi)
if xi_shape[-1] != ndim:
raise ValueError(f"Shapes: xi.shape={xi_shape} and ndim={ndim}")
# complex -> double
was_complex = self.c.dtype.kind == 'c'
cc = self.c
if was_complex and self.c.ndim == ndim:
# make sure that core dimensions are intact, and complex->float
# size doubling only adds a trailing dimension
cc = self.c[..., None]
cc = cc.view(float)
# prepare the coefficients: flatten the trailing dimensions
c1 = cc.reshape(cc.shape[:ndim] + (-1,))
c1r = c1.ravel()
# replacement for np.ravel_multi_index for indexing of `c1`:
_strides_c1 = np.asarray([s // c1.dtype.itemsize
for s in c1.strides], dtype=np.int64)
num_c_tr = c1.shape[-1] # # of trailing coefficients
out = _dierckx.evaluate_ndbspline(xi,
self._t,
self._len_t,
self._k,
nu,
extrapolate,
c1r,
num_c_tr,
_strides_c1,
self._indices_k1d,
)
out = out.view(self.c.dtype)
return out.reshape(xi_shape[:-1] + self.c.shape[ndim:])
@classmethod
def design_matrix(cls, xvals, t, k, extrapolate=True):
"""Construct the design matrix as a CSR format sparse array.
Parameters
----------
xvals : ndarray, shape(npts, ndim)
Data points. ``xvals[j, :]`` gives the ``j``-th data point as an
``ndim``-dimensional array.
t : tuple of 1D ndarrays, length-ndim
Knot vectors in directions 1, 2, ... ndim,
k : int
B-spline degree.
extrapolate : bool, optional
Whether to extrapolate out-of-bounds values of raise a `ValueError`
Returns
-------
design_matrix : a CSR array
Each row of the design matrix corresponds to a value in `xvals` and
contains values of b-spline basis elements which are non-zero
at this value.
"""
xvals = np.asarray(xvals, dtype=float)
ndim = xvals.shape[-1]
if len(t) != ndim:
raise ValueError(
f"Data and knots are inconsistent: len(t) = {len(t)} for "
f" {ndim = }."
)
# tabulate the flat indices for iterating over the (k+1)**ndim subarray
k, _indices_k1d, (_t, len_t) = _preprocess_inputs(k, t)
# Precompute the shape and strides of the 'coefficients array'.
# This would have been the NdBSpline coefficients; in the present context
# this is a helper to compute the indices into the colocation matrix.
c_shape = tuple(len_t[d] - k[d] - 1 for d in range(ndim))
# The strides of the coeffs array: the computation is equivalent to
# >>> cstrides = [s // 8 for s in np.empty(c_shape).strides]
cs = c_shape[1:] + (1,)
cstrides = np.cumprod(cs[::-1], dtype=np.int64)[::-1].copy()
# heavy lifting happens here
data, indices, indptr = _dierckx._coloc_nd(xvals,
_t, len_t, k, _indices_k1d, cstrides)
return csr_array((data, indices, indptr))
def _preprocess_inputs(k, t_tpl):
"""Helpers: validate and preprocess NdBSpline inputs.
Parameters
----------
k : int or tuple
Spline orders
t_tpl : tuple or array-likes
Knots.
"""
# 1. Make sure t_tpl is a tuple
if not isinstance(t_tpl, tuple):
raise ValueError(f"Expect `t` to be a tuple of array-likes. "
f"Got {t_tpl} instead."
)
# 2. Make ``k`` a tuple of integers
ndim = len(t_tpl)
try:
len(k)
except TypeError:
# make k a tuple
k = (k,)*ndim
k = np.asarray([operator.index(ki) for ki in k], dtype=np.int64)
if len(k) != ndim:
raise ValueError(f"len(t) = {len(t_tpl)} != {len(k) = }.")
# 3. Validate inputs
ndim = len(t_tpl)
for d in range(ndim):
td = np.asarray(t_tpl[d])
kd = k[d]
n = td.shape[0] - kd - 1
if kd < 0:
raise ValueError(f"Spline degree in dimension {d} cannot be"
f" negative.")
if td.ndim != 1:
raise ValueError(f"Knot vector in dimension {d} must be"
f" one-dimensional.")
if n < kd + 1:
raise ValueError(f"Need at least {2*kd + 2} knots for degree"
f" {kd} in dimension {d}.")
if (np.diff(td) < 0).any():
raise ValueError(f"Knots in dimension {d} must be in a"
f" non-decreasing order.")
if len(np.unique(td[kd:n + 1])) < 2:
raise ValueError(f"Need at least two internal knots in"
f" dimension {d}.")
if not np.isfinite(td).all():
raise ValueError(f"Knots in dimension {d} should not have"
f" nans or infs.")
# 4. tabulate the flat indices for iterating over the (k+1)**ndim subarray
# non-zero b-spline elements
shape = tuple(kd + 1 for kd in k)
indices = np.unravel_index(np.arange(prod(shape)), shape)
_indices_k1d = np.asarray(indices, dtype=np.int64).T.copy()
# 5. pack the knots into a single array:
# ([1, 2, 3, 4], [5, 6], (7, 8, 9)) -->
# array([[1, 2, 3, 4],
# [5, 6, nan, nan],
# [7, 8, 9, nan]])
ndim = len(t_tpl)
len_t = [len(ti) for ti in t_tpl]
_t = np.empty((ndim, max(len_t)), dtype=float)
_t.fill(np.nan)
for d in range(ndim):
_t[d, :len(t_tpl[d])] = t_tpl[d]
len_t = np.asarray(len_t, dtype=np.int64)
return k, _indices_k1d, (_t, len_t)
def _iter_solve(a, b, solver=ssl.gcrotmk, **solver_args):
# work around iterative solvers not accepting multiple r.h.s.
# also work around a.dtype == float64 and b.dtype == complex128
# cf https://github.com/scipy/scipy/issues/19644
if np.issubdtype(b.dtype, np.complexfloating):
real = _iter_solve(a, b.real, solver, **solver_args)
imag = _iter_solve(a, b.imag, solver, **solver_args)
return real + 1j*imag
if b.ndim == 2 and b.shape[1] !=1:
res = np.empty_like(b)
for j in range(b.shape[1]):
res[:, j], info = solver(a, b[:, j], **solver_args)
if info != 0:
raise ValueError(f"{solver = } returns {info =} for column {j}.")
return res
else:
res, info = solver(a, b, **solver_args)
if info != 0:
raise ValueError(f"{solver = } returns {info = }.")
return res
def make_ndbspl(points, values, k=3, *, solver=ssl.gcrotmk, **solver_args):
"""Construct an interpolating NdBspline.
Parameters
----------
points : tuple of ndarrays of float, with shapes (m1,), ... (mN,)
The points defining the regular grid in N dimensions. The points in
each dimension (i.e. every element of the `points` tuple) must be
strictly ascending or descending.
values : ndarray of float, shape (m1, ..., mN, ...)
The data on the regular grid in n dimensions.
k : int, optional
The spline degree. Must be odd. Default is cubic, k=3
solver : a `scipy.sparse.linalg` solver (iterative or direct), optional.
An iterative solver from `scipy.sparse.linalg` or a direct one,
`sparse.sparse.linalg.spsolve`.
Used to solve the sparse linear system
``design_matrix @ coefficients = rhs`` for the coefficients.
Default is `scipy.sparse.linalg.gcrotmk`
solver_args : dict, optional
Additional arguments for the solver. The call signature is
``solver(csr_array, rhs_vector, **solver_args)``
Returns
-------
spl : NdBSpline object
Notes
-----
Boundary conditions are not-a-knot in all dimensions.
"""
ndim = len(points)
xi_shape = tuple(len(x) for x in points)
try:
len(k)
except TypeError:
# make k a tuple
k = (k,)*ndim
for d, point in enumerate(points):
numpts = len(np.atleast_1d(point))
if numpts <= k[d]:
raise ValueError(f"There are {numpts} points in dimension {d},"
f" but order {k[d]} requires at least "
f" {k[d]+1} points per dimension.")
t = tuple(_not_a_knot(np.asarray(points[d], dtype=float), k[d])
for d in range(ndim))
xvals = np.asarray([xv for xv in itertools.product(*points)], dtype=float)
# construct the colocation matrix
matr = NdBSpline.design_matrix(xvals, t, k)
# Solve for the coefficients given `values`.
# Trailing dimensions: first ndim dimensions are data, the rest are batch
# dimensions, so stack `values` into a 2D array for `spsolve` to undestand.
v_shape = values.shape
vals_shape = (prod(v_shape[:ndim]), prod(v_shape[ndim:]))
vals = values.reshape(vals_shape)
if solver != ssl.spsolve:
solver = functools.partial(_iter_solve, solver=solver)
if "atol" not in solver_args:
# avoid a DeprecationWarning, grumble grumble
solver_args["atol"] = 1e-6
coef = solver(matr, vals, **solver_args)
coef = coef.reshape(xi_shape + v_shape[ndim:])
return NdBSpline(t, coef, k)

View file

@ -0,0 +1,329 @@
"""
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
import numpy as np
from ._interpnd import (LinearNDInterpolator, NDInterpolatorBase,
CloughTocher2DInterpolator, _ndim_coords_from_arrays)
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbor interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""Nearest-neighbor interpolator in N > 1 dimensions.
Methods
-------
__call__
Parameters
----------
x : (npoints, ndims) 2-D ndarray of floats
Data point coordinates.
y : (npoints, ) 1-D ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
See Also
--------
griddata :
Interpolate unstructured D-D data.
LinearNDInterpolator :
Piecewise linear interpolator in N dimensions.
CloughTocher2DInterpolator :
Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
interpn : Interpolation on a regular grid or rectilinear grid.
RegularGridInterpolator : Interpolator on a regular or rectilinear grid
in arbitrary dimensions (`interpn` wraps this
class).
Notes
-----
Uses ``scipy.spatial.cKDTree``
.. note:: For data on a regular grid use `interpn` instead.
Examples
--------
We can interpolate values on a 2D plane:
>>> from scipy.interpolate import NearestNDInterpolator
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = rng.random(10) - 0.5
>>> y = rng.random(10) - 0.5
>>> z = np.hypot(x, y)
>>> X = np.linspace(min(x), max(x))
>>> Y = np.linspace(min(y), max(y))
>>> X, Y = np.meshgrid(X, Y) # 2D grid for interpolation
>>> interp = NearestNDInterpolator(list(zip(x, y)), z)
>>> Z = interp(X, Y)
>>> plt.pcolormesh(X, Y, Z, shading='auto')
>>> plt.plot(x, y, "ok", label="input point")
>>> plt.legend()
>>> plt.colorbar()
>>> plt.axis("equal")
>>> plt.show()
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = np.asarray(y)
def __call__(self, *args, **query_options):
"""
Evaluate interpolator at given points.
Parameters
----------
x1, x2, ... xn : array-like of float
Points where to interpolate data at.
x1, x2, ... xn can be array-like of float with broadcastable shape.
or x1 can be array-like of float with shape ``(..., ndim)``
**query_options
This allows ``eps``, ``p``, ``distance_upper_bound``, and ``workers``
being passed to the cKDTree's query function to be explicitly set.
See `scipy.spatial.cKDTree.query` for an overview of the different options.
.. versionadded:: 1.12.0
"""
# For the sake of enabling subclassing, NDInterpolatorBase._set_xi performs
# some operations which are not required by NearestNDInterpolator.__call__,
# hence here we operate on xi directly, without calling a parent class function.
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
# We need to handle two important cases:
# (1) the case where xi has trailing dimensions (..., ndim), and
# (2) the case where y has trailing dimensions
# We will first flatten xi to deal with case (1),
# do the computation in flattened array while retaining y's dimensionality,
# and then reshape the interpolated values back to match xi's shape.
# Flatten xi for the query
xi_flat = xi.reshape(-1, xi.shape[-1])
original_shape = xi.shape
flattened_shape = xi_flat.shape
# if distance_upper_bound is set to not be infinite,
# then we need to consider the case where cKDtree
# does not find any points within distance_upper_bound to return.
# It marks those points as having infinte distance, which is what will be used
# below to mask the array and return only the points that were deemed
# to have a close enough neighbor to return something useful.
dist, i = self.tree.query(xi_flat, **query_options)
valid_mask = np.isfinite(dist)
# create a holder interp_values array and fill with nans.
if self.values.ndim > 1:
interp_shape = flattened_shape[:-1] + self.values.shape[1:]
else:
interp_shape = flattened_shape[:-1]
if np.issubdtype(self.values.dtype, np.complexfloating):
interp_values = np.full(interp_shape, np.nan, dtype=self.values.dtype)
else:
interp_values = np.full(interp_shape, np.nan)
interp_values[valid_mask] = self.values[i[valid_mask], ...]
if self.values.ndim > 1:
new_shape = original_shape[:-1] + self.values.shape[1:]
else:
new_shape = original_shape[:-1]
interp_values = interp_values.reshape(new_shape)
return interp_values
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Convenience function for interpolating unstructured data in multiple dimensions.
Parameters
----------
points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
Data point coordinates.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tessellate the input point set to N-D
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Returns
-------
ndarray
Array of interpolated values.
See Also
--------
LinearNDInterpolator :
Piecewise linear interpolator in N dimensions.
NearestNDInterpolator :
Nearest-neighbor interpolator in N dimensions.
CloughTocher2DInterpolator :
Piecewise cubic, C1 smooth, curvature-minimizing interpolator in 2D.
interpn : Interpolation on a regular grid or rectilinear grid.
RegularGridInterpolator : Interpolator on a regular or rectilinear grid
in arbitrary dimensions (`interpn` wraps this
class).
Notes
-----
.. versionadded:: 0.9
.. note:: For data on a regular grid use `interpn` instead.
Examples
--------
Suppose we want to interpolate the 2-D function
>>> import numpy as np
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> rng = np.random.default_rng()
>>> points = rng.random((1000, 2))
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
""" # numpy/numpydoc#87 # noqa: E501
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from ._interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError(
f"Unknown interpolation method {method!r} for {ndim} dimensional data"
)

View file

@ -0,0 +1,67 @@
from numpy import zeros, asarray, eye, poly1d, hstack, r_
from scipy import linalg
__all__ = ["pade"]
def pade(an, m, n=None):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomial `q`.
n : int, optional
The order of the returned approximating polynomial `p`. By default,
the order is ``len(an)-1-m``.
Returns
-------
p, q : Polynomial class
The Pade approximation of the polynomial defined by `an` is
``p(x)/q(x)``.
Examples
--------
>>> import numpy as np
>>> from scipy.interpolate import pade
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
an = asarray(an)
if n is None:
n = len(an) - 1 - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
if n < 0:
raise ValueError("Order of p <n> must be greater than 0.")
N = m + n
if N > len(an)-1:
raise ValueError("Order of q+p <m+n> must be smaller than len(an).")
an = an[:N+1]
Akj = eye(N+1, n+1, dtype=an.dtype)
Bkj = zeros((N+1, m), dtype=an.dtype)
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,290 @@
"""rbf - Radial basis functions for interpolation/smoothing scattered N-D data.
Written by John Travers <jtravs@gmail.com>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Interpolation with multi-dimensional target domain by Josua Sassen
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
Copyright (c) 2007, John Travers <jtravs@gmail.com>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
from scipy import linalg
from scipy.special import xlogy
from scipy.spatial.distance import cdist, pdist, squareform
__all__ = ['Rbf']
class Rbf:
"""
Rbf(*args, **kwargs)
Class for radial basis function interpolation of functions from
N-D scattered data to an M-D domain (legacy).
.. legacy:: class
`Rbf` is legacy code, for new usage please use `RBFInterpolator`
instead.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(default is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : str, callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g., the default: 'euclidean', such that the result
is a matrix of the distances from each point in ``x1`` to each point in
``x2``. For more options, see documentation of
`scipy.spatial.distances.cdist`.
mode : str, optional
Mode of the interpolation, can be '1-D' (default) or 'N-D'. When it is
'1-D' the data `d` will be considered as 1-D and flattened
internally. When it is 'N-D' the data `d` is assumed to be an array of
shape (n_samples, m), where m is the dimension of the target domain.
Attributes
----------
N : int
The number of data points (as determined by the input arrays).
di : ndarray
The 1-D array of data values at each of the data coordinates `xi`.
xi : ndarray
The 2-D array of data coordinates.
function : str or callable
The radial basis function. See description under Parameters.
epsilon : float
Parameter used by gaussian or multiquadrics functions. See Parameters.
smooth : float
Smoothing parameter. See description under Parameters.
norm : str or callable
The distance function. See description under Parameters.
mode : str
Mode of the interpolation. See description under Parameters.
nodes : ndarray
A 1-D array of node values for the interpolation.
A : internal property, do not use
See Also
--------
RBFInterpolator
Examples
--------
>>> import numpy as np
>>> from scipy.interpolate import Rbf
>>> rng = np.random.default_rng()
>>> x, y, z, d = rng.random((4, 50))
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> xi = yi = zi = np.linspace(0, 1, 20)
>>> di = rbfi(xi, yi, zi) # interpolated values
>>> di.shape
(20,)
"""
# Available radial basis functions that can be selected as strings;
# they all start with _h_ (self._init_function relies on that)
def _h_multiquadric(self, r):
return np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return np.exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
return xlogy(r**2, r)
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self)
if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "__call__"):
val = self.function.__call__.__func__
else:
raise ValueError("Cannot determine number of arguments to "
"function")
argcount = val.__code__.co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
self._function = self.function.__get__(self, Rbf)
else:
raise ValueError("Function argument must take 1 or 2 "
"arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of "
"the same shape")
return a0
def __init__(self, *args, **kwargs):
# `args` can be a variable number of arrays; we flatten them and store
# them as a single 2-D array `xi` of shape (n_args-1, array_size),
# plus a 1-D array `di` for the values.
# All arrays must have the same number of elements
self.xi = np.asarray([np.asarray(a, dtype=np.float64).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.mode = kwargs.pop('mode', '1-D')
if self.mode == '1-D':
self.di = np.asarray(args[-1]).flatten()
self._target_dim = 1
elif self.mode == 'N-D':
self.di = np.asarray(args[-1])
self._target_dim = self.di.shape[-1]
else:
raise ValueError("Mode has to be 1-D or N-D.")
if not all([x.size == self.di.shape[0] for x in self.xi]):
raise ValueError("All arrays must be equal length.")
self.norm = kwargs.pop('norm', 'euclidean')
self.epsilon = kwargs.pop('epsilon', None)
if self.epsilon is None:
# default epsilon is the "the average distance between nodes" based
# on a bounding hypercube
ximax = np.amax(self.xi, axis=1)
ximin = np.amin(self.xi, axis=1)
edges = ximax - ximin
edges = edges[np.nonzero(edges)]
self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self for use by any user-callable
# function or to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
# Compute weights
if self._target_dim > 1: # If we have more than one target dimension,
# we first factorize the matrix
self.nodes = np.zeros((self.N, self._target_dim), dtype=self.di.dtype)
lu, piv = linalg.lu_factor(self.A)
for i in range(self._target_dim):
self.nodes[:, i] = linalg.lu_solve((lu, piv), self.di[:, i])
else:
self.nodes = linalg.solve(self.A, self.di)
@property
def A(self):
# this only exists for backwards compatibility: self.A was available
# and, at least technically, public.
r = squareform(pdist(self.xi.T, self.norm)) # Pairwise norm
return self._init_function(r) - np.eye(self.N)*self.smooth
def _call_norm(self, x1, x2):
return cdist(x1.T, x2.T, self.norm)
def __call__(self, *args):
args = [np.asarray(x) for x in args]
if not all([x.shape == y.shape for x in args for y in args]):
raise ValueError("Array lengths must be equal")
if self._target_dim > 1:
shp = args[0].shape + (self._target_dim,)
else:
shp = args[0].shape
xa = np.asarray([a.flatten() for a in args], dtype=np.float64)
r = self._call_norm(xa, self.xi)
return np.dot(self._function(r), self.nodes).reshape(shp)

View file

@ -0,0 +1,550 @@
"""Module for RBF interpolation."""
import warnings
from itertools import combinations_with_replacement
import numpy as np
from numpy.linalg import LinAlgError
from scipy.spatial import KDTree
from scipy.special import comb
from scipy.linalg.lapack import dgesv # type: ignore[attr-defined]
from ._rbfinterp_pythran import (_build_system,
_build_evaluation_coefficients,
_polynomial_matrix)
__all__ = ["RBFInterpolator"]
# These RBFs are implemented.
_AVAILABLE = {
"linear",
"thin_plate_spline",
"cubic",
"quintic",
"multiquadric",
"inverse_multiquadric",
"inverse_quadratic",
"gaussian"
}
# The shape parameter does not need to be specified when using these RBFs.
_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
# For RBFs that are conditionally positive definite of order m, the interpolant
# should include polynomial terms with degree >= m - 1. Define the minimum
# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
# Approximation Methods with MATLAB". The RBFs that are not in this dictionary
# are positive definite and do not need polynomial terms.
_NAME_TO_MIN_DEGREE = {
"multiquadric": 0,
"linear": 0,
"thin_plate_spline": 1,
"cubic": 1,
"quintic": 2
}
def _monomial_powers(ndim, degree):
"""Return the powers for each monomial in a polynomial.
Parameters
----------
ndim : int
Number of variables in the polynomial.
degree : int
Degree of the polynomial.
Returns
-------
(nmonos, ndim) int ndarray
Array where each row contains the powers for each variable in a
monomial.
"""
nmonos = comb(degree + ndim, ndim, exact=True)
out = np.zeros((nmonos, ndim), dtype=np.dtype("long"))
count = 0
for deg in range(degree + 1):
for mono in combinations_with_replacement(range(ndim), deg):
# `mono` is a tuple of variables in the current monomial with
# multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
for var in mono:
out[count, var] += 1
count += 1
return out
def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
"""Build and solve the RBF interpolation system of equations.
Parameters
----------
y : (P, N) float ndarray
Data point coordinates.
d : (P, S) float ndarray
Data values at `y`.
smoothing : (P,) float ndarray
Smoothing parameter for each data point.
kernel : str
Name of the RBF.
epsilon : float
Shape parameter.
powers : (R, N) int ndarray
The exponents for each monomial in the polynomial.
Returns
-------
coeffs : (P + R, S) float ndarray
Coefficients for each RBF and monomial.
shift : (N,) float ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
"""
lhs, rhs, shift, scale = _build_system(
y, d, smoothing, kernel, epsilon, powers
)
_, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)
if info < 0:
raise ValueError(f"The {-info}-th argument had an illegal value.")
elif info > 0:
msg = "Singular matrix."
nmonos = powers.shape[0]
if nmonos > 0:
pmat = _polynomial_matrix((y - shift)/scale, powers)
rank = np.linalg.matrix_rank(pmat)
if rank < nmonos:
msg = (
"Singular matrix. The matrix of monomials evaluated at "
"the data point coordinates does not have full column "
f"rank ({rank}/{nmonos})."
)
raise LinAlgError(msg)
return shift, scale, coeffs
class RBFInterpolator:
"""Radial basis function interpolator in N ≥ 1 dimensions.
Parameters
----------
y : (npoints, ndims) array_like
2-D array of data point coordinates.
d : (npoints, ...) array_like
N-D array of data values at `y`. The length of `d` along the first
axis must be equal to the length of `y`. Unlike some interpolators, the
interpolation axis cannot be changed.
neighbors : int, optional
If specified, the value of the interpolant at each evaluation point
will be computed using only this many nearest data points. All the data
points are used by default.
smoothing : float or (npoints, ) array_like, optional
Smoothing parameter. The interpolant perfectly fits the data when this
is set to 0. For large values, the interpolant approaches a least
squares fit of a polynomial with the specified degree. Default is 0.
kernel : str, optional
Type of RBF. This should be one of
- 'linear' : ``-r``
- 'thin_plate_spline' : ``r**2 * log(r)``
- 'cubic' : ``r**3``
- 'quintic' : ``-r**5``
- 'multiquadric' : ``-sqrt(1 + r**2)``
- 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
- 'inverse_quadratic' : ``1/(1 + r**2)``
- 'gaussian' : ``exp(-r**2)``
Default is 'thin_plate_spline'.
epsilon : float, optional
Shape parameter that scales the input to the RBF. If `kernel` is
'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
1 and can be ignored because it has the same effect as scaling the
smoothing parameter. Otherwise, this must be specified.
degree : int, optional
Degree of the added polynomial. For some RBFs the interpolant may not
be well-posed if the polynomial degree is too small. Those RBFs and
their corresponding minimum degrees are
- 'multiquadric' : 0
- 'linear' : 0
- 'thin_plate_spline' : 1
- 'cubic' : 1
- 'quintic' : 2
The default value is the minimum degree for `kernel` or 0 if there is
no minimum degree. Set this to -1 for no added polynomial.
Notes
-----
An RBF is a scalar valued function in N-dimensional space whose value at
:math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
is the center of the RBF.
An RBF interpolant for the vector of data values :math:`d`, which are from
locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
plus a polynomial with a specified degree. The RBF interpolant is written
as
.. math::
f(x) = K(x, y) a + P(x) b,
where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
monomials, which span polynomials with the specified degree, evaluated at
:math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
linear equations
.. math::
(K(y, y) + \\lambda I) a + P(y) b = d
and
.. math::
P(y)^T a = 0,
where :math:`\\lambda` is a non-negative smoothing parameter that controls
how well we want to fit the data. The data are fit exactly when the
smoothing parameter is 0.
The above system is uniquely solvable if the following requirements are
met:
- :math:`P(y)` must have full column rank. :math:`P(y)` always has full
column rank when `degree` is -1 or 0. When `degree` is 1,
:math:`P(y)` has full column rank if the data point locations are not
all collinear (N=2), coplanar (N=3), etc.
- If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
'cubic', or 'quintic', then `degree` must not be lower than the
minimum value listed above.
- If `smoothing` is 0, then each data point location must be distinct.
When using an RBF that is not scale invariant ('multiquadric',
'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
shape parameter must be chosen (e.g., through cross validation). Smaller
values for the shape parameter correspond to wider RBFs. The problem can
become ill-conditioned or singular when the shape parameter is too small.
The memory required to solve for the RBF interpolation coefficients
increases quadratically with the number of data points, which can become
impractical when interpolating more than about a thousand data points.
To overcome memory limitations for large interpolation problems, the
`neighbors` argument can be specified to compute an RBF interpolant for
each evaluation point using only the nearest data points.
.. versionadded:: 1.7.0
See Also
--------
NearestNDInterpolator
LinearNDInterpolator
CloughTocher2DInterpolator
References
----------
.. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
World Scientific Publishing Co.
.. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
.. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
.. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
Examples
--------
Demonstrate interpolating scattered data to a grid in 2-D.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import RBFInterpolator
>>> from scipy.stats.qmc import Halton
>>> rng = np.random.default_rng()
>>> xobs = 2*Halton(2, seed=rng).random(100) - 1
>>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
>>> xgrid = np.mgrid[-1:1:50j, -1:1:50j]
>>> xflat = xgrid.reshape(2, -1).T
>>> yflat = RBFInterpolator(xobs, yobs)(xflat)
>>> ygrid = yflat.reshape(50, 50)
>>> fig, ax = plt.subplots()
>>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
>>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
>>> fig.colorbar(p)
>>> plt.show()
"""
def __init__(self, y, d,
neighbors=None,
smoothing=0.0,
kernel="thin_plate_spline",
epsilon=None,
degree=None):
y = np.asarray(y, dtype=float, order="C")
if y.ndim != 2:
raise ValueError("`y` must be a 2-dimensional array.")
ny, ndim = y.shape
d_dtype = complex if np.iscomplexobj(d) else float
d = np.asarray(d, dtype=d_dtype, order="C")
if d.shape[0] != ny:
raise ValueError(
f"Expected the first axis of `d` to have length {ny}."
)
d_shape = d.shape[1:]
d = d.reshape((ny, -1))
# If `d` is complex, convert it to a float array with twice as many
# columns. Otherwise, the LHS matrix would need to be converted to
# complex and take up 2x more memory than necessary.
d = d.view(float)
if np.isscalar(smoothing):
smoothing = np.full(ny, smoothing, dtype=float)
else:
smoothing = np.asarray(smoothing, dtype=float, order="C")
if smoothing.shape != (ny,):
raise ValueError(
"Expected `smoothing` to be a scalar or have shape "
f"({ny},)."
)
kernel = kernel.lower()
if kernel not in _AVAILABLE:
raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
if epsilon is None:
if kernel in _SCALE_INVARIANT:
epsilon = 1.0
else:
raise ValueError(
"`epsilon` must be specified if `kernel` is not one of "
f"{_SCALE_INVARIANT}."
)
else:
epsilon = float(epsilon)
min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
if degree is None:
degree = max(min_degree, 0)
else:
degree = int(degree)
if degree < -1:
raise ValueError("`degree` must be at least -1.")
elif -1 < degree < min_degree:
warnings.warn(
f"`degree` should not be below {min_degree} except -1 "
f"when `kernel` is '{kernel}'."
f"The interpolant may not be uniquely "
f"solvable, and the smoothing parameter may have an "
f"unintuitive effect.",
UserWarning, stacklevel=2
)
if neighbors is None:
nobs = ny
else:
# Make sure the number of nearest neighbors used for interpolation
# does not exceed the number of observations.
neighbors = int(min(neighbors, ny))
nobs = neighbors
powers = _monomial_powers(ndim, degree)
# The polynomial matrix must have full column rank in order for the
# interpolant to be well-posed, which is not possible if there are
# fewer observations than monomials.
if powers.shape[0] > nobs:
raise ValueError(
f"At least {powers.shape[0]} data points are required when "
f"`degree` is {degree} and the number of dimensions is {ndim}."
)
if neighbors is None:
shift, scale, coeffs = _build_and_solve_system(
y, d, smoothing, kernel, epsilon, powers
)
# Make these attributes private since they do not always exist.
self._shift = shift
self._scale = scale
self._coeffs = coeffs
else:
self._tree = KDTree(y)
self.y = y
self.d = d
self.d_shape = d_shape
self.d_dtype = d_dtype
self.neighbors = neighbors
self.smoothing = smoothing
self.kernel = kernel
self.epsilon = epsilon
self.powers = powers
def _chunk_evaluator(
self,
x,
y,
shift,
scale,
coeffs,
memory_budget=1000000
):
"""
Evaluate the interpolation while controlling memory consumption.
We chunk the input if we need more memory than specified.
Parameters
----------
x : (Q, N) float ndarray
array of points on which to evaluate
y: (P, N) float ndarray
array of points on which we know function values
shift: (N, ) ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
coeffs: (P+R, S) float ndarray
Coefficients in front of basis functions
memory_budget: int
Total amount of memory (in units of sizeof(float)) we wish
to devote for storing the array of coefficients for
interpolated points. If we need more memory than that, we
chunk the input.
Returns
-------
(Q, S) float ndarray
Interpolated array
"""
nx, ndim = x.shape
if self.neighbors is None:
nnei = len(y)
else:
nnei = self.neighbors
# in each chunk we consume the same space we already occupy
chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1
if chunksize <= nx:
out = np.empty((nx, self.d.shape[1]), dtype=float)
for i in range(0, nx, chunksize):
vec = _build_evaluation_coefficients(
x[i:i + chunksize, :],
y,
self.kernel,
self.epsilon,
self.powers,
shift,
scale)
out[i:i + chunksize, :] = np.dot(vec, coeffs)
else:
vec = _build_evaluation_coefficients(
x,
y,
self.kernel,
self.epsilon,
self.powers,
shift,
scale)
out = np.dot(vec, coeffs)
return out
def __call__(self, x):
"""Evaluate the interpolant at `x`.
Parameters
----------
x : (Q, N) array_like
Evaluation point coordinates.
Returns
-------
(Q, ...) ndarray
Values of the interpolant at `x`.
"""
x = np.asarray(x, dtype=float, order="C")
if x.ndim != 2:
raise ValueError("`x` must be a 2-dimensional array.")
nx, ndim = x.shape
if ndim != self.y.shape[1]:
raise ValueError("Expected the second axis of `x` to have length "
f"{self.y.shape[1]}.")
# Our memory budget for storing RBF coefficients is
# based on how many floats in memory we already occupy
# If this number is below 1e6 we just use 1e6
# This memory budget is used to decide how we chunk
# the inputs
memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
if self.neighbors is None:
out = self._chunk_evaluator(
x,
self.y,
self._shift,
self._scale,
self._coeffs,
memory_budget=memory_budget)
else:
# Get the indices of the k nearest observation points to each
# evaluation point.
_, yindices = self._tree.query(x, self.neighbors)
if self.neighbors == 1:
# `KDTree` squeezes the output when neighbors=1.
yindices = yindices[:, None]
# Multiple evaluation points may have the same neighborhood of
# observation points. Make the neighborhoods unique so that we only
# compute the interpolation coefficients once for each
# neighborhood.
yindices = np.sort(yindices, axis=1)
yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
inv = np.reshape(inv, (-1,)) # flatten, we need 1-D indices
# `inv` tells us which neighborhood will be used by each evaluation
# point. Now we find which evaluation points will be using each
# neighborhood.
xindices = [[] for _ in range(len(yindices))]
for i, j in enumerate(inv):
xindices[j].append(i)
out = np.empty((nx, self.d.shape[1]), dtype=float)
for xidx, yidx in zip(xindices, yindices):
# `yidx` are the indices of the observations in this
# neighborhood. `xidx` are the indices of the evaluation points
# that are using this neighborhood.
xnbr = x[xidx]
ynbr = self.y[yidx]
dnbr = self.d[yidx]
snbr = self.smoothing[yidx]
shift, scale, coeffs = _build_and_solve_system(
ynbr,
dnbr,
snbr,
self.kernel,
self.epsilon,
self.powers,
)
out[xidx] = self._chunk_evaluator(
xnbr,
ynbr,
shift,
scale,
coeffs,
memory_budget=memory_budget)
out = out.view(self.d_dtype)
out = out.reshape((nx, ) + self.d_shape)
return out

View file

@ -0,0 +1,764 @@
__all__ = ['RegularGridInterpolator', 'interpn']
import itertools
import numpy as np
import scipy.sparse.linalg as ssl
from ._interpnd import _ndim_coords_from_arrays
from ._cubic import PchipInterpolator
from ._rgi_cython import evaluate_linear_2d, find_indices
from ._bsplines import make_interp_spline
from ._fitpack2 import RectBivariateSpline
from ._ndbspline import make_ndbspl
def _check_points(points):
descending_dimensions = []
grid = []
for i, p in enumerate(points):
# early make points float
# see https://github.com/scipy/scipy/pull/17230
p = np.asarray(p, dtype=float)
if not np.all(p[1:] > p[:-1]):
if np.all(p[1:] < p[:-1]):
# input is descending, so make it ascending
descending_dimensions.append(i)
p = np.flip(p)
else:
raise ValueError(
f"The points in dimension {i} must be strictly ascending or "
f"descending"
)
# see https://github.com/scipy/scipy/issues/17716
p = np.ascontiguousarray(p)
grid.append(p)
return tuple(grid), tuple(descending_dimensions)
def _check_dimensionality(points, values):
if len(points) > values.ndim:
raise ValueError(
f"There are {len(points)} point arrays, but values has "
f"{values.ndim} dimensions"
)
for i, p in enumerate(points):
if not np.asarray(p).ndim == 1:
raise ValueError(f"The points in dimension {i} must be 1-dimensional")
if not values.shape[i] == len(p):
raise ValueError(
f"There are {len(p)} points and {values.shape[i]} values in "
f"dimension {i}"
)
class RegularGridInterpolator:
"""Interpolator of specified order on a rectilinear grid in N ≥ 1 dimensions.
The data must be defined on a rectilinear grid; that is, a rectangular
grid with even or uneven spacing. Linear, nearest-neighbor, spline
interpolations are supported. After setting up the interpolator object,
the interpolation method may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions. The points in
each dimension (i.e. every elements of the points tuple) must be
strictly ascending or descending.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions. Complex data is
accepted.
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic" and "pchip". This
parameter will become the default for the object's ``__call__``
method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
Default is True.
fill_value : float or None, optional
The value to use for points outside of the interpolation domain.
If None, values outside the domain are extrapolated.
Default is ``np.nan``.
solver : callable, optional
Only used for methods "slinear", "cubic" and "quintic".
Sparse linear algebra solver for construction of the NdBSpline instance.
Default is the iterative solver `scipy.sparse.linalg.gcrotmk`.
.. versionadded:: 1.13
solver_args: dict, optional
Additional arguments to pass to `solver`, if any.
.. versionadded:: 1.13
Methods
-------
__call__
Attributes
----------
grid : tuple of ndarrays
The points defining the regular grid in n dimensions.
This tuple defines the full grid via
``np.meshgrid(*grid, indexing='ij')``
values : ndarray
Data values at the grid.
method : str
Interpolation method.
fill_value : float or ``None``
Use this value for out-of-bounds arguments to `__call__`.
bounds_error : bool
If ``True``, out-of-bounds argument raise a ``ValueError``.
Notes
-----
Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
In other words, this class assumes that the data is defined on a
*rectilinear* grid.
.. versionadded:: 0.14
The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
tensor-product spline interpolators, where `k` is the spline degree,
If any dimension has fewer points than `k` + 1, an error will be raised.
.. versionadded:: 1.9
If the input data is such that dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolating.
**Choosing a solver for spline methods**
Spline methods, "slinear", "cubic" and "quintic" involve solving a
large sparse linear system at instantiation time. Depending on data,
the default solver may or may not be adequate. When it is not, you may
need to experiment with an optional `solver` argument, where you may
choose between the direct solver (`scipy.sparse.linalg.spsolve`) or
iterative solvers from `scipy.sparse.linalg`. You may need to supply
additional parameters via the optional `solver_args` parameter (for instance,
you may supply the starting value or target tolerance). See the
`scipy.sparse.linalg` documentation for the full list of available options.
Alternatively, you may instead use the legacy methods, "slinear_legacy",
"cubic_legacy" and "quintic_legacy". These methods allow faster construction
but evaluations will be much slower.
**Rounding rule at half points with `nearest` method**
The rounding rule with the `nearest` method at half points is rounding *down*.
Examples
--------
**Evaluate a function on the points of a 3-D grid**
As a first example, we evaluate a simple example function on the points of
a 3-D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> import numpy as np
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
>>> data = f(xg, yg, zg)
``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> interp = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3],
... [3.3, 5.2, 7.1]])
>>> interp(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
>>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
(125.54200000000002, 145.894)
**Interpolate and extrapolate a 2D dataset**
As a second example, we interpolate and extrapolate a 2D data set:
>>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
>>> def ff(x, y):
... return x**2 + y**2
>>> xg, yg = np.meshgrid(x, y, indexing='ij')
>>> data = ff(xg, yg)
>>> interp = RegularGridInterpolator((x, y), data,
... bounds_error=False, fill_value=None)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection='3d')
>>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
... s=60, c='k', label='data')
Evaluate and plot the interpolator on a finer grid
>>> xx = np.linspace(-4, 9, 31)
>>> yy = np.linspace(-4, 9, 31)
>>> X, Y = np.meshgrid(xx, yy, indexing='ij')
>>> # interpolator
>>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
... alpha=0.4, color='m', label='linear interp')
>>> # ground truth
>>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
... alpha=0.4, label='ground truth')
>>> plt.legend()
>>> plt.show()
Other examples are given
:ref:`in the tutorial <tutorial-interpolate_regular_grid_interpolator>`.
See Also
--------
NearestNDInterpolator : Nearest neighbor interpolator on *unstructured*
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolator on *unstructured* data
in N dimensions
interpn : a convenience function which wraps `RegularGridInterpolator`
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
(suitable for e.g., N-D image resampling)
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
:doi:`10.1090/S0025-5718-1988-0917826-0`
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
_SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3,
"slinear_legacy": 1, "cubic_legacy": 3, "quintic_legacy": 5,}
_SPLINE_METHODS_recursive = {"slinear_legacy", "cubic_legacy",
"quintic_legacy", "pchip"}
_SPLINE_METHODS_ndbspl = {"slinear", "cubic", "quintic"}
_SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
_ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan, *, solver=None, solver_args=None):
if method not in self._ALL_METHODS:
raise ValueError(f"Method '{method}' is not defined")
elif method in self._SPLINE_METHODS:
self._validate_grid_dimensions(points, method)
self.method = method
self._spline = None
self.bounds_error = bounds_error
self.grid, self._descending_dimensions = _check_points(points)
self.values = self._check_values(values)
self._check_dimensionality(self.grid, self.values)
self.fill_value = self._check_fill_value(self.values, fill_value)
if self._descending_dimensions:
self.values = np.flip(values, axis=self._descending_dimensions)
if self.method == "pchip" and np.iscomplexobj(self.values):
msg = ("`PchipInterpolator` only works with real values. If you are trying "
"to use the real components of the passed array, use `np.real` on "
"the array before passing to `RegularGridInterpolator`.")
raise ValueError(msg)
if method in self._SPLINE_METHODS_ndbspl:
if solver_args is None:
solver_args = {}
self._spline = self._construct_spline(method, solver, **solver_args)
else:
if solver is not None or solver_args:
raise ValueError(
f"{method =} does not accept the 'solver' argument. Got "
f" {solver = } and with arguments {solver_args}."
)
def _construct_spline(self, method, solver=None, **solver_args):
if solver is None:
solver = ssl.gcrotmk
spl = make_ndbspl(
self.grid, self.values, self._SPLINE_DEGREE_MAP[method],
solver=solver, **solver_args
)
return spl
def _check_dimensionality(self, grid, values):
_check_dimensionality(grid, values)
def _check_points(self, points):
return _check_points(points)
def _check_values(self, values):
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
return values
def _check_fill_value(self, values, fill_value):
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
return fill_value
def __call__(self, xi, method=None, *, nu=None):
"""
Interpolation at coordinates.
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to evaluate the interpolator at.
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic" and "pchip". Default is
the method chosen when the interpolator was created.
nu : sequence of ints, length ndim, optional
If not None, the orders of the derivatives to evaluate.
Each entry must be non-negative.
Only allowed for methods "slinear", "cubic" and "quintic".
.. versionadded:: 1.13
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at `xi`. See notes for behaviour when
``xi.ndim == 1``.
Notes
-----
In the case that ``xi.ndim == 1`` a new axis is inserted into
the 0 position of the returned array, values_x, so its shape is
instead ``(1,) + values.shape[ndim:]``.
Examples
--------
Here we define a nearest-neighbor interpolator of a simple function
>>> import numpy as np
>>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
>>> def f(x, y):
... return x**2 + y**2
>>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
>>> from scipy.interpolate import RegularGridInterpolator
>>> interp = RegularGridInterpolator((x, y), data, method='nearest')
By construction, the interpolator uses the nearest-neighbor
interpolation
>>> interp([[1.5, 1.3], [0.3, 4.5]])
array([2., 9.])
We can however evaluate the linear interpolant by overriding the
`method` parameter
>>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
array([ 4.7, 24.3])
"""
_spline = self._spline
method = self.method if method is None else method
is_method_changed = self.method != method
if method not in self._ALL_METHODS:
raise ValueError(f"Method '{method}' is not defined")
if is_method_changed and method in self._SPLINE_METHODS_ndbspl:
_spline = self._construct_spline(method)
if nu is not None and method not in self._SPLINE_METHODS_ndbspl:
raise ValueError(
f"Can only compute derivatives for methods "
f"{self._SPLINE_METHODS_ndbspl}, got {method =}."
)
xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
if method == "linear":
indices, norm_distances = self._find_indices(xi.T)
if (ndim == 2 and hasattr(self.values, 'dtype') and
self.values.ndim == 2 and self.values.flags.writeable and
self.values.dtype in (np.float64, np.complex128) and
self.values.dtype.byteorder == '='):
# until cython supports const fused types, the fast path
# cannot support non-writeable values
# a fast path
out = np.empty(indices.shape[1], dtype=self.values.dtype)
result = evaluate_linear_2d(self.values,
indices,
norm_distances,
self.grid,
out)
else:
result = self._evaluate_linear(indices, norm_distances)
elif method == "nearest":
indices, norm_distances = self._find_indices(xi.T)
result = self._evaluate_nearest(indices, norm_distances)
elif method in self._SPLINE_METHODS:
if is_method_changed:
self._validate_grid_dimensions(self.grid, method)
if method in self._SPLINE_METHODS_recursive:
result = self._evaluate_spline(xi, method)
else:
result = _spline(xi, nu=nu)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
# f(nan) = nan, if any
if np.any(nans):
result[nans] = np.nan
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _prepare_xi(self, xi):
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
f"{xi.shape[-1]} but this "
f"RegularGridInterpolator has dimension {ndim}")
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
xi = np.asarray(xi, dtype=float)
# find nans in input
nans = np.any(np.isnan(xi), axis=-1)
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError(
f"One of the requested xi is out of bounds in dimension {i}"
)
out_of_bounds = None
else:
out_of_bounds = self._find_out_of_bounds(xi.T)
return xi, xi_shape, ndim, nans, out_of_bounds
def _evaluate_linear(self, indices, norm_distances):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# Compute shifting up front before zipping everything together
shift_norm_distances = [1 - yi for yi in norm_distances]
shift_indices = [i + 1 for i in indices]
# The formula for linear interpolation in 2d takes the form:
# values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \
# self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \
# self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \
# self.values[(i0 + 1, i1 + 1)] * y0 * y1
# We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
zipped1 = zip(indices, shift_norm_distances)
zipped2 = zip(shift_indices, norm_distances)
# Take all products of zipped1 and zipped2 and iterate over them
# to get the terms in the above formula. This corresponds to iterating
# over the vertices of a hypercube.
hypercube = itertools.product(*zip(zipped1, zipped2))
value = np.array([0.])
for h in hypercube:
edge_indices, weights = zip(*h)
weight = np.array([1.])
for w in weights:
weight = weight * w
term = np.asarray(self.values[edge_indices]) * weight[vslice]
value = value + term # cannot use += because broadcasting
return value
def _evaluate_nearest(self, indices, norm_distances):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self.values[tuple(idx_res)]
def _validate_grid_dimensions(self, points, method):
k = self._SPLINE_DEGREE_MAP[method]
for i, point in enumerate(points):
ndim = len(np.atleast_1d(point))
if ndim <= k:
raise ValueError(f"There are {ndim} points in dimension {i},"
f" but method {method} requires at least "
f" {k+1} points per dimension.")
def _evaluate_spline(self, xi, method):
# ensure xi is 2D list of points to evaluate (`m` is the number of
# points and `n` is the number of interpolation dimensions,
# ``n == len(self.grid)``.)
if xi.ndim == 1:
xi = xi.reshape((1, xi.size))
m, n = xi.shape
# Reorder the axes: n-dimensional process iterates over the
# interpolation axes from the last axis downwards: E.g. for a 4D grid
# the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
# the 0th axis of its argument array (for 1D routine it's its ``y``
# array). Thus permute the interpolation axes of `values` *and keep
# trailing dimensions trailing*.
axes = tuple(range(self.values.ndim))
axx = axes[:n][::-1] + axes[n:]
values = self.values.transpose(axx)
if method == 'pchip':
_eval_func = self._do_pchip
else:
_eval_func = self._do_spline_fit
k = self._SPLINE_DEGREE_MAP[method]
# Non-stationary procedure: difficult to vectorize this part entirely
# into numpy-level operations. Unfortunately this requires explicit
# looping over each point in xi.
# can at least vectorize the first pass across all points in the
# last variable of xi.
last_dim = n - 1
first_values = _eval_func(self.grid[last_dim],
values,
xi[:, last_dim],
k)
# the rest of the dimensions have to be on a per point-in-xi basis
shape = (m, *self.values.shape[n:])
result = np.empty(shape, dtype=self.values.dtype)
for j in range(m):
# Main process: Apply 1D interpolate in each dimension
# sequentially, starting with the last dimension.
# These are then "folded" into the next dimension in-place.
folded_values = first_values[j, ...]
for i in range(last_dim-1, -1, -1):
# Interpolate for each 1D from the last dimensions.
# This collapses each 1D sequence into a scalar.
folded_values = _eval_func(self.grid[i],
folded_values,
xi[j, i],
k)
result[j, ...] = folded_values
return result
@staticmethod
def _do_spline_fit(x, y, pt, k):
local_interp = make_interp_spline(x, y, k=k, axis=0)
values = local_interp(pt)
return values
@staticmethod
def _do_pchip(x, y, pt, k):
local_interp = PchipInterpolator(x, y, axis=0)
values = local_interp(pt)
return values
def _find_indices(self, xi):
return find_indices(self.grid, xi)
def _find_out_of_bounds(self, xi):
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular or rectilinear grids.
Strictly speaking, not all regular grids are supported - this function
works on *rectilinear* grids, that is, a rectangular grid with even or
uneven spacing.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions. The points in
each dimension (i.e. every elements of the points tuple) must be
strictly ascending or descending.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions. Complex data is
accepted.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
"splinef2d" is only supported for 2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at `xi`. See notes for behaviour when
``xi.ndim == 1``.
See Also
--------
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : interpolation on a regular or rectilinear grid
in arbitrary dimensions (`interpn` wraps this
class).
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
(suitable for e.g., N-D image resampling)
Notes
-----
.. versionadded:: 0.14
In the case that ``xi.ndim == 1`` a new axis is inserted into
the 0 position of the returned array, values_x, so its shape is
instead ``(1,) + values.shape[ndim:]``.
If the input data is such that input dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolation.
Examples
--------
Evaluate a simple example function on the points of a regular 3-D grid:
>>> import numpy as np
>>> from scipy.interpolate import interpn
>>> def value_func_3d(x, y, z):
... return 2 * x + 3 * y - z
>>> x = np.linspace(0, 4, 5)
>>> y = np.linspace(0, 5, 6)
>>> z = np.linspace(0, 6, 7)
>>> points = (x, y, z)
>>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
Evaluate the interpolating function at a point
>>> point = np.array([2.21, 3.12, 1.15])
>>> print(interpn(points, values, point))
[12.63]
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
"splinef2d", "slinear",
"slinear_legacy", "cubic_legacy", "quintic_legacy"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
f"and 'splinef2d'. You provided {method}.")
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method splinef2d can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method splinef2d does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError(
f"There are {len(points)} point arrays, but values has {ndim} dimensions"
)
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method splinef2d can only be used for "
"scalar data with one point per coordinate")
grid, descending_dimensions = _check_points(points)
_check_dimensionality(grid, values)
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError(
f"The requested sample points xi have dimension {xi.shape[-1]}, "
f"but this RegularGridInterpolator has dimension {len(grid)}"
)
if bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError(
f"One of the requested xi is out of bounds in dimension {i}"
)
# perform interpolation
if method in RegularGridInterpolator._ALL_METHODS:
interp = RegularGridInterpolator(points, values, method=method,
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
else:
raise ValueError(f"unknown {method = }")

View file

@ -0,0 +1,24 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'spalde',
'splder',
'splev',
'splint',
'sproot',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="dfitpack",
private_modules=["_dfitpack"], all=__all__,
attribute=name)

View file

@ -0,0 +1,31 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'BSpline',
'bisplev',
'bisplrep',
'insert',
'spalde',
'splantider',
'splder',
'splev',
'splint',
'splprep',
'splrep',
'sproot',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="fitpack",
private_modules=["_fitpack_py"], all=__all__,
attribute=name)

View file

@ -0,0 +1,29 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'BivariateSpline',
'InterpolatedUnivariateSpline',
'LSQBivariateSpline',
'LSQSphereBivariateSpline',
'LSQUnivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline',
'SmoothBivariateSpline',
'SmoothSphereBivariateSpline',
'UnivariateSpline',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="fitpack2",
private_modules=["_fitpack2"], all=__all__,
attribute=name)

View file

@ -0,0 +1,24 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'CloughTocher2DInterpolator',
'GradientEstimationWarning',
'LinearNDInterpolator',
'NDInterpolatorBase',
'estimate_gradients_2d_global',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="interpnd",
private_modules=["_interpnd"], all=__all__,
attribute=name, dep_version="1.17.0")

View file

@ -0,0 +1,30 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'BPoly',
'BSpline',
'NdPPoly',
'PPoly',
'RectBivariateSpline',
'RegularGridInterpolator',
'interp1d',
'interp2d',
'interpn',
'lagrange',
'make_interp_spline',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="interpolate",
private_modules=["_interpolate", "fitpack2", "_rgi"],
all=__all__, attribute=name)

View file

@ -0,0 +1,23 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'CloughTocher2DInterpolator',
'LinearNDInterpolator',
'NearestNDInterpolator',
'griddata',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="ndgriddata",
private_modules=["_ndgriddata"], all=__all__,
attribute=name)

View file

@ -0,0 +1,24 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'BarycentricInterpolator',
'KroghInterpolator',
'approximate_taylor_polynomial',
'barycentric_interpolate',
'krogh_interpolate',
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="polyint",
private_modules=["_polyint"], all=__all__,
attribute=name)

View file

@ -0,0 +1,18 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.interpolate` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = ["Rbf"] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="interpolate", module="rbf",
private_modules=["_rbf"], all=__all__,
attribute=name)

View file

@ -0,0 +1,368 @@
# Copyright (c) 2017, The Chancellor, Masters and Scholars of the University
# of Oxford, and the Chebfun Developers. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of Oxford nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from math import factorial
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_array_less
import pytest
import scipy
from scipy.interpolate import AAA, FloaterHormannInterpolator, BarycentricInterpolator
TOL = 1e4 * np.finfo(np.float64).eps
UNIT_INTERVAL = np.linspace(-1, 1, num=1000)
PTS = np.logspace(-15, 0, base=10, num=500)
PTS = np.concatenate([-PTS[::-1], [0], PTS])
@pytest.mark.parametrize("method", [AAA, FloaterHormannInterpolator])
@pytest.mark.parametrize("dtype", [np.float32, np.float64, np.complex64, np.complex128])
def test_dtype_preservation(method, dtype):
rtol = np.finfo(dtype).eps ** 0.75 * 100
if method is FloaterHormannInterpolator:
rtol *= 100
rng = np.random.default_rng(59846294526092468)
z = np.linspace(-1, 1, dtype=dtype)
r = method(z, np.sin(z))
z2 = rng.uniform(-1, 1, size=100).astype(dtype)
assert_allclose(r(z2), np.sin(z2), rtol=rtol)
assert r(z2).dtype == dtype
if method is AAA:
assert r.support_points.dtype == dtype
assert r.support_values.dtype == dtype
assert r.errors.dtype == z.real.dtype
assert r.weights.dtype == dtype
assert r.poles().dtype == np.result_type(dtype, 1j)
assert r.residues().dtype == np.result_type(dtype, 1j)
assert r.roots().dtype == np.result_type(dtype, 1j)
@pytest.mark.parametrize("method", [AAA, FloaterHormannInterpolator])
@pytest.mark.parametrize("dtype", [np.int16, np.int32, np.int64])
def test_integer_promotion(method, dtype):
z = np.arange(10, dtype=dtype)
r = method(z, z)
assert r.weights.dtype == np.result_type(dtype, 1.0)
if method is AAA:
assert r.support_points.dtype == np.result_type(dtype, 1.0)
assert r.support_values.dtype == np.result_type(dtype, 1.0)
assert r.errors.dtype == np.result_type(dtype, 1.0)
assert r.poles().dtype == np.result_type(dtype, 1j)
assert r.residues().dtype == np.result_type(dtype, 1j)
assert r.roots().dtype == np.result_type(dtype, 1j)
assert r(z).dtype == np.result_type(dtype, 1.0)
class TestAAA:
def test_input_validation(self):
with pytest.raises(ValueError, match="same size"):
AAA([0], [1, 1])
with pytest.raises(ValueError, match="1-D"):
AAA([[0], [0]], [[1], [1]])
with pytest.raises(ValueError, match="finite"):
AAA([np.inf], [1])
with pytest.raises(TypeError):
AAA([1], [1], max_terms=1.0)
with pytest.raises(ValueError, match="greater"):
AAA([1], [1], max_terms=-1)
@pytest.mark.thread_unsafe
def test_convergence_error(self):
with pytest.warns(RuntimeWarning, match="AAA failed"):
AAA(UNIT_INTERVAL, np.exp(UNIT_INTERVAL), max_terms=1)
# The following tests are based on:
# https://github.com/chebfun/chebfun/blob/master/tests/chebfun/test_aaa.m
def test_exp(self):
f = np.exp(UNIT_INTERVAL)
r = AAA(UNIT_INTERVAL, f)
assert_allclose(r(UNIT_INTERVAL), f, atol=TOL)
assert_equal(r(np.nan), np.nan)
assert np.isfinite(r(np.inf))
m1 = r.support_points.size
r = AAA(UNIT_INTERVAL, f, rtol=1e-3)
assert r.support_points.size < m1
def test_tan(self):
f = np.tan(np.pi * UNIT_INTERVAL)
r = AAA(UNIT_INTERVAL, f)
assert_allclose(r(UNIT_INTERVAL), f, atol=10 * TOL, rtol=1.4e-7)
assert_allclose(np.min(np.abs(r.roots())), 0, atol=3e-10)
assert_allclose(np.min(np.abs(r.poles() - 0.5)), 0, atol=TOL)
# Test for spurious poles (poles with tiny residue are likely spurious)
assert np.min(np.abs(r.residues())) > 1e-13
def test_short_cases(self):
# Computed using Chebfun:
# >> format long
# >> [r, pol, res, zer, zj, fj, wj, errvec] = aaa([1 2], [0 1])
z = np.array([0, 1])
f = np.array([1, 2])
r = AAA(z, f, rtol=1e-13)
assert_allclose(r(z), f, atol=TOL)
assert_allclose(r.poles(), 0.5)
assert_allclose(r.residues(), 0.25)
assert_allclose(r.roots(), 1/3)
assert_equal(r.support_points, z)
assert_equal(r.support_values, f)
assert_allclose(r.weights, [0.707106781186547, 0.707106781186547])
assert_equal(r.errors, [1, 0])
# >> format long
# >> [r, pol, res, zer, zj, fj, wj, errvec] = aaa([1 0 0], [0 1 2])
z = np.array([0, 1, 2])
f = np.array([1, 0, 0])
r = AAA(z, f, rtol=1e-13)
assert_allclose(r(z), f, atol=TOL)
assert_allclose(np.sort(r.poles()),
np.sort([1.577350269189626, 0.422649730810374]))
assert_allclose(np.sort(r.residues()),
np.sort([-0.070441621801729, -0.262891711531604]))
assert_allclose(np.sort(r.roots()), np.sort([2, 1]))
assert_equal(r.support_points, z)
assert_equal(r.support_values, f)
assert_allclose(r.weights, [0.577350269189626, 0.577350269189626,
0.577350269189626])
assert_equal(r.errors, [1, 1, 0])
def test_scale_invariance(self):
z = np.linspace(0.3, 1.5)
f = np.exp(z) / (1 + 1j)
r1 = AAA(z, f)
r2 = AAA(z, (2**311 * f).astype(np.complex128))
r3 = AAA(z, (2**-311 * f).astype(np.complex128))
assert_equal(r1(0.2j), 2**-311 * r2(0.2j))
assert_equal(r1(1.4), 2**311 * r3(1.4))
def test_log_func(self):
rng = np.random.default_rng(1749382759832758297)
z = rng.standard_normal(10000) + 3j * rng.standard_normal(10000)
def f(z):
return np.log(5 - z) / (1 + z**2)
r = AAA(z, f(z))
assert_allclose(r(0), f(0), atol=TOL)
def test_infinite_data(self):
z = np.linspace(-1, 1)
r = AAA(z, scipy.special.gamma(z))
assert_allclose(r(0.63), scipy.special.gamma(0.63), atol=1e-15)
def test_nan(self):
x = np.linspace(0, 20)
with np.errstate(invalid="ignore"):
f = np.sin(x) / x
r = AAA(x, f)
assert_allclose(r(2), np.sin(2) / 2, atol=1e-15)
def test_residues(self):
x = np.linspace(-1.337, 2, num=537)
r = AAA(x, np.exp(x) / x)
ii = np.flatnonzero(np.abs(r.poles()) < 1e-8)
assert_allclose(r.residues()[ii], 1, atol=1e-15)
r = AAA(x, (1 + 1j) * scipy.special.gamma(x))
ii = np.flatnonzero(abs(r.poles() - (-1)) < 1e-8)
assert_allclose(r.residues()[ii], -1 - 1j, atol=1e-15)
# The following tests are based on:
# https://github.com/complexvariables/RationalFunctionApproximation.jl/blob/main/test/interval.jl
@pytest.mark.parametrize("func,atol,rtol",
[(lambda x: np.abs(x + 0.5 + 0.01j), 5e-13, 1e-7),
(lambda x: np.sin(1/(1.05 - x)), 2e-13, 1e-7),
(lambda x: np.exp(-1/(x**2)), 3.5e-12, 0),
(lambda x: np.exp(-100*x**2), 2e-12, 0),
(lambda x: np.exp(-10/(1.2 - x)), 1e-14, 0),
(lambda x: 1/(1+np.exp(100*(x + 0.5))), 2e-13, 1e-7),
(lambda x: np.abs(x - 0.95), 1e-6, 1e-7)])
def test_basic_functions(self, func, atol, rtol):
with np.errstate(divide="ignore"):
f = func(PTS)
assert_allclose(AAA(UNIT_INTERVAL, func(UNIT_INTERVAL))(PTS),
f, atol=atol, rtol=rtol)
def test_poles_zeros_residues(self):
def f(z):
return (z+1) * (z+2) / ((z+3) * (z+4))
r = AAA(UNIT_INTERVAL, f(UNIT_INTERVAL))
assert_allclose(np.sum(r.poles() + r.roots()), -10, atol=1e-12)
def f(z):
return 2/(3 + z) + 5/(z - 2j)
r = AAA(UNIT_INTERVAL, f(UNIT_INTERVAL))
assert_allclose(r.residues().prod(), 10, atol=1e-8)
r = AAA(UNIT_INTERVAL, np.sin(10*np.pi*UNIT_INTERVAL))
assert_allclose(np.sort(np.abs(r.roots()))[18], 0.9, atol=1e-12)
def f(z):
return (z - (3 + 3j))/(z + 2)
r = AAA(UNIT_INTERVAL, f(UNIT_INTERVAL))
assert_allclose(r.poles()[0]*r.roots()[0], -6-6j, atol=1e-12)
@pytest.mark.parametrize("func",
[lambda z: np.zeros_like(z), lambda z: z, lambda z: 1j*z,
lambda z: z**2 + z, lambda z: z**3 + z,
lambda z: 1/(1.1 + z), lambda z: 1/(1 + 1j*z),
lambda z: 1/(3 + z + z**2), lambda z: 1/(1.01 + z**3)])
def test_polynomials_and_reciprocals(self, func):
assert_allclose(AAA(UNIT_INTERVAL, func(UNIT_INTERVAL))(PTS),
func(PTS), atol=2e-13)
# The following tests are taken from:
# https://github.com/macd/BaryRational.jl/blob/main/test/test_aaa.jl
def test_spiral(self):
z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, num=1000))
r = AAA(z, np.tan(np.pi*z/2))
assert_allclose(np.sort(np.abs(r.poles()))[:4], [1, 1, 3, 3], rtol=9e-7)
@pytest.mark.thread_unsafe
def test_spiral_cleanup(self):
z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, num=1000))
# here we set `rtol=0` to force froissart doublets, without cleanup there
# are many spurious poles
with pytest.warns(RuntimeWarning):
r = AAA(z, np.tan(np.pi*z/2), rtol=0, max_terms=60, clean_up=False)
n_spurious = np.sum(np.abs(r.residues()) < 1e-14)
with pytest.warns(RuntimeWarning):
assert r.clean_up() >= 1
# check there are less potentially spurious poles than before
assert np.sum(np.abs(r.residues()) < 1e-14) < n_spurious
# check accuracy
assert_allclose(r(z), np.tan(np.pi*z/2), atol=6e-12, rtol=3e-12)
class TestFloaterHormann:
def runge(self, z):
return 1/(1 + z**2)
def scale(self, n, d):
return (-1)**(np.arange(n) + d) * factorial(d)
def test_iv(self):
with pytest.raises(ValueError, match="`x`"):
FloaterHormannInterpolator([[0]], [0], d=0)
with pytest.raises(ValueError, match="`y`"):
FloaterHormannInterpolator([0], 0, d=0)
with pytest.raises(ValueError, match="dimension"):
FloaterHormannInterpolator([0], [[1, 1], [1, 1]], d=0)
with pytest.raises(ValueError, match="finite"):
FloaterHormannInterpolator([np.inf], [1], d=0)
with pytest.raises(ValueError, match="`d`"):
FloaterHormannInterpolator([0], [0], d=-1)
with pytest.raises(ValueError, match="`d`"):
FloaterHormannInterpolator([0], [0], d=10)
with pytest.raises(TypeError):
FloaterHormannInterpolator([0], [0], d=0.0)
# reference values from Floater and Hormann 2007 page 8.
@pytest.mark.parametrize("d,expected", [
(0, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
(1, [1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]),
(2, [1, 3, 4, 4, 4, 4, 4, 4, 4, 3, 1]),
(3, [1, 4, 7, 8, 8, 8, 8, 8, 7, 4, 1]),
(4, [1, 5, 11, 15, 16, 16, 16, 15, 11, 5, 1])
])
def test_uniform_grid(self, d, expected):
# Check against explicit results on an uniform grid
x = np.arange(11)
r = FloaterHormannInterpolator(x, 0.0*x, d=d)
assert_allclose(r.weights.ravel()*self.scale(x.size, d), expected,
rtol=1e-15, atol=1e-15)
@pytest.mark.parametrize("d", range(10))
def test_runge(self, d):
x = np.linspace(0, 1, 51)
rng = np.random.default_rng(802754237598370893)
xx = rng.uniform(0, 1, size=1000)
y = self.runge(x)
h = x[1] - x[0]
r = FloaterHormannInterpolator(x, y, d=d)
tol = 10*h**(d+1)
assert_allclose(r(xx), self.runge(xx), atol=1e-10, rtol=tol)
# check interpolation property
assert_equal(r(x), self.runge(x))
def test_complex(self):
x = np.linspace(-1, 1)
z = x + x*1j
r = FloaterHormannInterpolator(z, np.sin(z), d=12)
xx = np.linspace(-1, 1, num=1000)
zz = xx + xx*1j
assert_allclose(r(zz), np.sin(zz), rtol=1e-12)
def test_polyinterp(self):
# check that when d=n-1 FH gives a polynomial interpolant
x = np.linspace(0, 1, 11)
xx = np.linspace(0, 1, 1001)
y = np.sin(x)
r = FloaterHormannInterpolator(x, y, d=x.size-1)
p = BarycentricInterpolator(x, y)
assert_allclose(r(xx), p(xx), rtol=1e-12, atol=1e-12)
@pytest.mark.parametrize("y_shape", [(2,), (2, 3, 1), (1, 5, 6, 4)])
@pytest.mark.parametrize("xx_shape", [(100), (10, 10)])
def test_trailing_dim(self, y_shape, xx_shape):
x = np.linspace(0, 1)
y = np.broadcast_to(
np.expand_dims(np.sin(x), tuple(range(1, len(y_shape) + 1))),
x.shape + y_shape
)
r = FloaterHormannInterpolator(x, y)
rng = np.random.default_rng(897138947238097528091759187597)
xx = rng.random(xx_shape)
yy = np.broadcast_to(
np.expand_dims(np.sin(xx), tuple(range(xx.ndim, len(y_shape) + xx.ndim))),
xx.shape + y_shape
)
rr = r(xx)
assert rr.shape == xx.shape + y_shape
assert_allclose(rr, yy, rtol=1e-6)
def test_zeros(self):
x = np.linspace(0, 10, num=100)
r = FloaterHormannInterpolator(x, np.sin(np.pi*x))
err = np.abs(np.subtract.outer(r.roots(), np.arange(11))).min(axis=0)
assert_array_less(err, 1e-5)
def test_no_poles(self):
x = np.linspace(-1, 1)
r = FloaterHormannInterpolator(x, 1/x**2)
p = r.poles()
mask = (p.real >= -1) & (p.real <= 1) & (np.abs(p.imag) < 1.e-12)
assert np.sum(mask) == 0

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,519 @@
import itertools
import os
import numpy as np
from scipy._lib._array_api import (
xp_assert_equal, xp_assert_close, assert_almost_equal, assert_array_almost_equal
)
from pytest import raises as assert_raises
import pytest
from scipy._lib._testutils import check_free_memory
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import make_splrep
from scipy.interpolate._fitpack_py import (splrep, splev, bisplrep, bisplev,
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
from scipy.interpolate._dfitpack import regrid_smth
from scipy.interpolate._fitpack2 import dfitpack_int
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
def norm2(x):
return np.sqrt(np.dot(x.T, x))
def f1(x, d=0):
"""Derivatives of sin->cos->-sin->-cos."""
if d % 4 == 0:
return np.sin(x)
if d % 4 == 1:
return np.cos(x)
if d % 4 == 2:
return -np.sin(x)
if d % 4 == 3:
return -np.cos(x)
def makepairs(x, y):
"""Helper function to create an array of pairs of x and y."""
xy = np.array(list(itertools.product(np.asarray(x), np.asarray(y))))
return xy.T
class TestSmokeTests:
"""
Smoke tests (with a few asserts) for fitpack routines -- mostly
check that they are runnable
"""
def check_1(self, per=0, s=0, a=0, b=2*np.pi, at_nodes=False,
xb=None, xe=None):
if xb is None:
xb = a
if xe is None:
xe = b
N = 20
# nodes and middle points of the nodes
x = np.linspace(a, b, N + 1)
x1 = a + (b - a) * np.arange(1, N, dtype=float) / float(N - 1)
v = f1(x)
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0 / N
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
for k in range(1, 6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
tt = tck[0][k:-k] if at_nodes else x1
for d in range(k+1):
tol = err_est(k, d)
err = norm2(f1(tt, d) - splev(tt, tck, d)) / norm2(f1(tt, d))
assert err < tol
# smoke test make_splrep
if not per:
spl = make_splrep(x, v, k=k, s=s, xb=xb, xe=xe)
if len(spl.t) == len(tck[0]):
xp_assert_close(spl.t, tck[0], atol=1e-15)
xp_assert_close(spl.c, tck[1][:spl.c.size], atol=1e-13)
else:
assert k == 5 # knot length differ in some k=5 cases
def check_2(self, per=0, N=20, ia=0, ib=2*np.pi):
a, b, dx = 0, 2*np.pi, 0.2*np.pi
x = np.linspace(a, b, N+1) # nodes
v = np.sin(x)
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0 / N
tol = 5 * h**(.75*(k-d))
return tol
nk = []
for k in range(1, 6):
tck = splrep(x, v, s=0, per=per, k=k, xe=b)
nk.append([splint(ia, ib, tck), spalde(dx, tck)])
k = 1
for r in nk:
d = 0
for dr in r[1]:
tol = err_est(k, d)
xp_assert_close(dr, f1(dx, d), atol=0, rtol=tol)
d = d+1
k = k+1
def test_smoke_splrep_splev(self):
self.check_1(s=1e-6)
self.check_1(b=1.5*np.pi)
self.check_1(b=1.5*np.pi, xe=2*np.pi, per=1, s=1e-1)
@pytest.mark.parametrize('per', [0, 1])
@pytest.mark.parametrize('at_nodes', [True, False])
def test_smoke_splrep_splev_2(self, per, at_nodes):
self.check_1(per=per, at_nodes=at_nodes)
@pytest.mark.parametrize('N', [20, 50])
@pytest.mark.parametrize('per', [0, 1])
def test_smoke_splint_spalde(self, N, per):
self.check_2(per=per, N=N)
@pytest.mark.parametrize('N', [20, 50])
@pytest.mark.parametrize('per', [0, 1])
def test_smoke_splint_spalde_iaib(self, N, per):
self.check_2(ia=0.2*np.pi, ib=np.pi, N=N, per=per)
def test_smoke_sproot(self):
# sproot is only implemented for k=3
a, b = 0.1, 15
x = np.linspace(a, b, 20)
v = np.sin(x)
for k in [1, 2, 4, 5]:
tck = splrep(x, v, s=0, per=0, k=k, xe=b)
with assert_raises(ValueError):
sproot(tck)
k = 3
tck = splrep(x, v, s=0, k=3)
roots = sproot(tck)
xp_assert_close(splev(roots, tck), np.zeros(len(roots)), atol=1e-10, rtol=1e-10)
xp_assert_close(roots, np.pi * np.array([1, 2, 3, 4]), rtol=1e-3)
@pytest.mark.parametrize('N', [20, 50])
@pytest.mark.parametrize('k', [1, 2, 3, 4, 5])
def test_smoke_splprep_splrep_splev(self, N, k):
a, b, dx = 0, 2.*np.pi, 0.2*np.pi
x = np.linspace(a, b, N+1) # nodes
v = np.sin(x)
tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
uv = splev(dx, tckp)
err1 = abs(uv[1] - np.sin(uv[0]))
assert err1 < 1e-2
tck = splrep(x, v, s=0, per=0, k=k)
err2 = abs(splev(uv[0], tck) - np.sin(uv[0]))
assert err2 < 1e-2
# Derivatives of parametric cubic spline at u (first function)
if k == 3:
tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
for d in range(1, k+1):
uv = splev(dx, tckp, d)
def test_smoke_bisplrep_bisplev(self):
xb, xe = 0, 2.*np.pi
yb, ye = 0, 2.*np.pi
kx, ky = 3, 3
Nx, Ny = 20, 20
def f2(x, y):
return np.sin(x+y)
x = np.linspace(xb, xe, Nx + 1)
y = np.linspace(yb, ye, Ny + 1)
xy = makepairs(x, y)
tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
tt = [tck[0][kx:-kx], tck[1][ky:-ky]]
t2 = makepairs(tt[0], tt[1])
v1 = bisplev(tt[0], tt[1], tck)
v2 = f2(t2[0], t2[1])
v2.shape = len(tt[0]), len(tt[1])
assert norm2(np.ravel(v1 - v2)) < 1e-2
class TestSplev:
def test_1d_shape(self):
x = [1,2,3,4,5]
y = [4,5,6,7,8]
tck = splrep(x, y)
z = splev([1], tck)
assert z.shape == (1,)
z = splev(1, tck)
assert z.shape == ()
def test_2d_shape(self):
x = [1, 2, 3, 4, 5]
y = [4, 5, 6, 7, 8]
tck = splrep(x, y)
t = np.array([[1.0, 1.5, 2.0, 2.5],
[3.0, 3.5, 4.0, 4.5]])
z = splev(t, tck)
z0 = splev(t[0], tck)
z1 = splev(t[1], tck)
xp_assert_equal(z, np.vstack((z0, z1)))
def test_extrapolation_modes(self):
# test extrapolation modes
# * if ext=0, return the extrapolated value.
# * if ext=1, return 0
# * if ext=2, raise a ValueError
# * if ext=3, return the boundary value.
x = [1,2,3]
y = [0,2,4]
tck = splrep(x, y, k=1)
rstl = [[-2, 6], [0, 0], None, [0, 4]]
for ext in (0, 1, 3):
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
class TestSplder:
def setup_method(self):
# non-uniform grid, just to make it sure
x = np.linspace(0, 1, 100)**3
y = np.sin(20 * x)
self.spl = splrep(x, y)
# double check that knots are non-uniform
assert np.ptp(np.diff(self.spl[0])) > 0
def test_inverse(self):
# Check that antiderivative + derivative is identity.
for n in range(5):
spl2 = splantider(self.spl, n)
spl3 = splder(spl2, n)
xp_assert_close(self.spl[0], spl3[0])
xp_assert_close(self.spl[1], spl3[1])
assert self.spl[2] == spl3[2]
def test_splder_vs_splev(self):
# Check derivative vs. FITPACK
for n in range(3+1):
# Also extrapolation!
xx = np.linspace(-1, 2, 2000)
if n == 3:
# ... except that FITPACK extrapolates strangely for
# order 0, so let's not check that.
xx = xx[(xx >= 0) & (xx <= 1)]
dy = splev(xx, self.spl, n)
spl2 = splder(self.spl, n)
dy2 = splev(xx, spl2)
if n == 1:
xp_assert_close(dy, dy2, rtol=2e-6)
else:
xp_assert_close(dy, dy2)
def test_splantider_vs_splint(self):
# Check antiderivative vs. FITPACK
spl2 = splantider(self.spl)
# no extrapolation, splint assumes function is zero outside
# range
xx = np.linspace(0, 1, 20)
for x1 in xx:
for x2 in xx:
y1 = splint(x1, x2, self.spl)
y2 = splev(x2, spl2) - splev(x1, spl2)
xp_assert_close(np.asarray(y1), np.asarray(y2))
def test_order0_diff(self):
assert_raises(ValueError, splder, self.spl, 4)
def test_kink(self):
# Should refuse to differentiate splines with kinks
spl2 = insert(0.5, self.spl, m=2)
splder(spl2, 2) # Should work
assert_raises(ValueError, splder, spl2, 3)
spl2 = insert(0.5, self.spl, m=3)
splder(spl2, 1) # Should work
assert_raises(ValueError, splder, spl2, 2)
spl2 = insert(0.5, self.spl, m=4)
assert_raises(ValueError, splder, spl2, 1)
def test_multidim(self):
# c can have trailing dims
for n in range(3):
t, c, k = self.spl
c2 = np.c_[c, c, c]
c2 = np.dstack((c2, c2))
spl2 = splantider((t, c2, k), n)
spl3 = splder(spl2, n)
xp_assert_close(t, spl3[0])
xp_assert_close(c2, spl3[1])
assert k == spl3[2]
class TestSplint:
def test_len_c(self):
n, k = 7, 3
x = np.arange(n)
y = x**3
t, c, k = splrep(x, y, s=0)
# note that len(c) == len(t) == 11 (== len(x) + 2*(k-1))
assert len(t) == len(c) == n + 2*(k-1)
# integrate directly: $\int_0^6 x^3 dx = 6^4 / 4$
res = splint(0, 6, (t, c, k))
expected = 6**4 / 4
assert abs(res - expected) < 1e-13
# check that the coefficients past len(t) - k - 1 are ignored
c0 = c.copy()
c0[len(t) - k - 1:] = np.nan
res0 = splint(0, 6, (t, c0, k))
assert abs(res0 - expected) < 1e-13
# however, all other coefficients *are* used
c0[6] = np.nan
assert np.isnan(splint(0, 6, (t, c0, k)))
# check that the coefficient array can have length `len(t) - k - 1`
c1 = c[:len(t) - k - 1]
res1 = splint(0, 6, (t, c1, k))
assert (res1 - expected) < 1e-13
# however shorter c arrays raise. The error from f2py is a
# `dftipack.error`, which is an Exception but not ValueError etc.
with assert_raises(Exception, match=r">=n-k-1"):
splint(0, 1, (np.ones(10), np.ones(5), 3))
class TestBisplrep:
def test_overflow(self):
from numpy.lib.stride_tricks import as_strided
if dfitpack_int.itemsize == 8:
size = 1500000**2
else:
size = 400**2
# Don't allocate a real array, as it's very big, but rely
# on that it's not referenced
x = as_strided(np.zeros(()), shape=(size,))
assert_raises(OverflowError, bisplrep, x, x, x, w=x,
xb=0, xe=1, yb=0, ye=1, s=0)
def test_regression_1310(self):
# Regression test for gh-1310
with np.load(data_file('bug-1310.npz')) as loaded_data:
data = loaded_data['data']
# Shouldn't crash -- the input data triggers work array sizes
# that caused previously some data to not be aligned on
# sizeof(double) boundaries in memory, which made the Fortran
# code to crash when compiled with -O3
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
full_output=True)
@pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
def test_ilp64_bisplrep(self):
check_free_memory(28000) # VM size, doesn't actually use the pages
x = np.linspace(0, 1, 400)
y = np.linspace(0, 1, 400)
x, y = np.meshgrid(x, y)
z = np.zeros_like(x)
tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
xp_assert_close(bisplev(0.5, 0.5, tck), 0.0)
def test_dblint():
# Basic test to see it runs and gives the correct result on a trivial
# problem. Note that `dblint` is not exposed in the interpolate namespace.
x = np.linspace(0, 1)
y = np.linspace(0, 1)
xx, yy = np.meshgrid(x, y)
rect = RectBivariateSpline(x, y, 4 * xx * yy)
tck = list(rect.tck)
tck.extend(rect.degrees)
assert abs(dblint(0, 1, 0, 1, tck) - 1) < 1e-10
assert abs(dblint(0, 0.5, 0, 1, tck) - 0.25) < 1e-10
assert abs(dblint(0.5, 1, 0, 1, tck) - 0.75) < 1e-10
assert abs(dblint(-100, 100, -100, 100, tck) - 1) < 1e-10
def test_splev_der_k():
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
# for x outside of knot range
# test case from gh-2188
tck = (np.array([0., 0., 2.5, 2.5]),
np.array([-1.56679978, 2.43995873, 0., 0.]),
1)
t, c, k = tck
x = np.array([-3, 0, 2.5, 3])
# an explicit form of the linear spline
xp_assert_close(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
xp_assert_close(splev(x, tck, 1),
np.ones_like(x) * (c[1] - c[0]) / t[2]
)
# now check a random spline vs splder
np.random.seed(1234)
x = np.sort(np.random.random(30))
y = np.random.random(30)
t, c, k = splrep(x, y)
x = [t[0] - 1., t[-1] + 1.]
tck2 = splder((t, c, k), k)
xp_assert_close(splev(x, (t, c, k), k), splev(x, tck2))
def test_splprep_segfault():
# regression test for gh-3847: splprep segfaults if knots are specified
# for task=-1
t = np.arange(0, 1.1, 0.1)
x = np.sin(2*np.pi*t)
y = np.cos(2*np.pi*t)
tck, u = splprep([x, y], s=0)
np.arange(0, 1.01, 0.01)
uknots = tck[0] # using the knots from the previous fitting
tck, u = splprep([x, y], task=-1, t=uknots) # here is the crash
def test_bisplev_integer_overflow():
np.random.seed(1)
x = np.linspace(0, 1, 11)
y = x
z = np.random.randn(11, 11).ravel()
kx = 1
ky = 1
nx, tx, ny, ty, c, fp, ier = regrid_smth(
x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
xp = np.zeros([2621440])
yp = np.zeros([2621440])
assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
@pytest.mark.xslow
def test_gh_1766():
# this should fail gracefully instead of segfaulting (int overflow)
size = 22
kx, ky = 3, 3
def f2(x, y):
return np.sin(x+y)
x = np.linspace(0, 10, size)
y = np.linspace(50, 700, size)
xy = makepairs(x, y)
tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
# the size value here can either segfault
# or produce a MemoryError on main
tx_ty_size = 500000
tck[0] = np.arange(tx_ty_size)
tck[1] = np.arange(tx_ty_size) * 4
tt_0 = np.arange(50)
tt_1 = np.arange(50) * 3
with pytest.raises(MemoryError):
bisplev(tt_0, tt_1, tck, 1, 1)
def test_spalde_scalar_input():
# Ticket #629
x = np.linspace(0, 10)
y = x**3
tck = splrep(x, y, k=3, t=[5])
res = spalde(np.float64(1), tck)
des = np.array([1., 3., 6., 6.])
assert_almost_equal(res, des)
def test_spalde_nc():
# regression test for https://github.com/scipy/scipy/issues/19002
# here len(t) = 29 and len(c) = 25 (== len(t) - k - 1)
x = np.asarray([-10., -9., -8., -7., -6., -5., -4., -3., -2.5, -2., -1.5,
-1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 4., 5., 6.],
dtype="float")
t = [-10.0, -10.0, -10.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0,
-2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0,
5.0, 6.0, 6.0, 6.0, 6.0]
c = np.asarray([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
k = 3
res = spalde(x, (t, c, k))
res = np.vstack(res)
res_splev = np.asarray([splev(x, (t, c, k), nu) for nu in range(4)])
xp_assert_close(res, res_splev.T, atol=1e-15)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,64 @@
import itertools
import threading
import time
import numpy as np
import pytest
import scipy.interpolate
class TestGIL:
"""Check if the GIL is properly released by scipy.interpolate functions."""
def setup_method(self):
self.messages = []
def log(self, message):
self.messages.append(message)
def make_worker_thread(self, target, args):
log = self.log
class WorkerThread(threading.Thread):
def run(self):
log('interpolation started')
target(*args)
log('interpolation complete')
return WorkerThread()
@pytest.mark.xslow
@pytest.mark.xfail(reason='race conditions, may depend on system load')
def test_rectbivariatespline(self):
def generate_params(n_points):
x = y = np.linspace(0, 1000, n_points)
x_grid, y_grid = np.meshgrid(x, y)
z = x_grid * y_grid
return x, y, z
def calibrate_delay(requested_time):
for n_points in itertools.count(5000, 1000):
args = generate_params(n_points)
time_started = time.time()
interpolate(*args)
if time.time() - time_started > requested_time:
return args
def interpolate(x, y, z):
scipy.interpolate.RectBivariateSpline(x, y, z)
args = calibrate_delay(requested_time=3)
worker_thread = self.make_worker_thread(interpolate, args)
worker_thread.start()
for i in range(3):
time.sleep(0.5)
self.log('working')
worker_thread.join()
assert self.messages == [
'interpolation started',
'working',
'working',
'working',
'interpolation complete',
]

View file

@ -0,0 +1,452 @@
import os
import sys
import numpy as np
from numpy.testing import suppress_warnings
from pytest import raises as assert_raises
import pytest
from scipy._lib._array_api import xp_assert_close, assert_almost_equal
from scipy._lib._testutils import check_free_memory
import scipy.interpolate._interpnd as interpnd
import scipy.spatial._qhull as qhull
import pickle
import threading
_IS_32BIT = (sys.maxsize < 2**32)
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
class TestLinearNDInterpolation:
def test_smoketest(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
yi = interpnd.LinearNDInterpolator(x, y)(x)
assert_almost_equal(y, yi)
def test_smoketest_alternate(self):
# Test at single points, alternate calling convention
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
assert_almost_equal(y, yi)
def test_complex_smoketest(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 3j*y
yi = interpnd.LinearNDInterpolator(x, y)(x)
assert_almost_equal(y, yi)
def test_tri_input(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 3j*y
tri = qhull.Delaunay(x)
interpolator = interpnd.LinearNDInterpolator(tri, y)
yi = interpolator(x)
assert_almost_equal(y, yi)
assert interpolator.tri is tri
def test_square(self):
# Test barycentric interpolation on a square against a manual
# implementation
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.float64)
values = np.array([1., 2., -3., 5.], dtype=np.float64)
# NB: assume triangles (0, 1, 3) and (1, 2, 3)
#
# 1----2
# | \ |
# | \ |
# 0----3
def ip(x, y):
t1 = (x + y <= 1)
t2 = ~t1
x1 = x[t1]
y1 = y[t1]
x2 = x[t2]
y2 = y[t2]
z = 0*x
z[t1] = (values[0]*(1 - x1 - y1)
+ values[1]*y1
+ values[3]*x1)
z[t2] = (values[2]*(x2 + y2 - 1)
+ values[1]*(1 - x2)
+ values[3]*(1 - y2))
return z
xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
np.linspace(0, 1, 14)[None,:])
xx = xx.ravel()
yy = yy.ravel()
xi = np.array([xx, yy]).T.copy()
zi = interpnd.LinearNDInterpolator(points, values)(xi)
assert_almost_equal(zi, ip(xx, yy))
def test_smoketest_rescale(self):
# Test at single points
x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
assert_almost_equal(y, yi)
def test_square_rescale(self):
# Test barycentric interpolation on a rectangle with rescaling
# agaings the same implementation without rescaling
points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.float64)
values = np.array([1., 2., -3., 5.], dtype=np.float64)
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
np.linspace(0, 100, 14)[None,:])
xx = xx.ravel()
yy = yy.ravel()
xi = np.array([xx, yy]).T.copy()
zi = interpnd.LinearNDInterpolator(points, values)(xi)
zi_rescaled = interpnd.LinearNDInterpolator(points, values,
rescale=True)(xi)
assert_almost_equal(zi, zi_rescaled)
def test_tripoints_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 3j*y
tri = qhull.Delaunay(x)
yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
rescale=True)(x)
assert_almost_equal(yi, yi_rescale)
def test_tri_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 3j*y
tri = qhull.Delaunay(x)
match = ("Rescaling is not supported when passing a "
"Delaunay triangulation as ``points``.")
with pytest.raises(ValueError, match=match):
interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
def test_pickle(self):
# Test at single points
np.random.seed(1234)
x = np.random.rand(30, 2)
y = np.random.rand(30) + 1j*np.random.rand(30)
ip = interpnd.LinearNDInterpolator(x, y)
ip2 = pickle.loads(pickle.dumps(ip))
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
@pytest.mark.slow
@pytest.mark.thread_unsafe
@pytest.mark.skipif(_IS_32BIT, reason='it fails on 32-bit')
def test_threading(self):
# This test was taken from issue 8856
# https://github.com/scipy/scipy/issues/8856
check_free_memory(10000)
r_ticks = np.arange(0, 4200, 10)
phi_ticks = np.arange(0, 4200, 10)
r_grid, phi_grid = np.meshgrid(r_ticks, phi_ticks)
def do_interp(interpolator, slice_rows, slice_cols):
grid_x, grid_y = np.mgrid[slice_rows, slice_cols]
res = interpolator((grid_x, grid_y))
return res
points = np.vstack((r_grid.ravel(), phi_grid.ravel())).T
values = (r_grid * phi_grid).ravel()
interpolator = interpnd.LinearNDInterpolator(points, values)
worker_thread_1 = threading.Thread(
target=do_interp,
args=(interpolator, slice(0, 2100), slice(0, 2100)))
worker_thread_2 = threading.Thread(
target=do_interp,
args=(interpolator, slice(2100, 4200), slice(0, 2100)))
worker_thread_3 = threading.Thread(
target=do_interp,
args=(interpolator, slice(0, 2100), slice(2100, 4200)))
worker_thread_4 = threading.Thread(
target=do_interp,
args=(interpolator, slice(2100, 4200), slice(2100, 4200)))
worker_thread_1.start()
worker_thread_2.start()
worker_thread_3.start()
worker_thread_4.start()
worker_thread_1.join()
worker_thread_2.join()
worker_thread_3.join()
worker_thread_4.join()
class TestEstimateGradients2DGlobal:
def test_smoketest(self):
x = np.array([(0, 0), (0, 2),
(1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
tri = qhull.Delaunay(x)
# Should be exact for linear functions, independent of triangulation
funcs = [
(lambda x, y: 0*x + 1, (0, 0)),
(lambda x, y: 0 + x, (1, 0)),
(lambda x, y: -2 + y, (0, 1)),
(lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
]
for j, (func, grad) in enumerate(funcs):
z = func(x[:,0], x[:,1])
dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
assert dz.shape == (6, 2)
xp_assert_close(
dz, np.array(grad)[None, :] + 0*dz, rtol=1e-5, atol=1e-5,
err_msg=f"item {j}"
)
def test_regression_2359(self):
# Check regression --- for certain point sets, gradient
# estimation could end up in an infinite loop
points = np.load(data_file('estimate_gradients_hang.npy'))
values = np.random.rand(points.shape[0])
tri = qhull.Delaunay(points)
# This should not hang
with suppress_warnings() as sup:
sup.filter(interpnd.GradientEstimationWarning,
"Gradient estimation did not converge")
interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
class TestCloughTocher2DInterpolator:
def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False,
rescale=False, **kw):
rng = np.random.RandomState(1234)
# np.random.seed(1234)
if x is None:
x = np.array([(0, 0), (0, 1),
(1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
(0.5, 0.2)],
dtype=float)
if not alternate:
ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
tol=1e-6, rescale=rescale)
else:
ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
func(x[:,0], x[:,1]),
tol=1e-6, rescale=rescale)
p = rng.rand(50, 2)
if not alternate:
a = ip(p)
else:
a = ip(p[:,0], p[:,1])
b = func(p[:,0], p[:,1])
try:
xp_assert_close(a, b, **kw)
except AssertionError:
print("_check_accuracy: abs(a-b):", abs(a - b))
print("ip.grad:", ip.grad)
raise
def test_linear_smoketest(self):
# Should be exact for linear functions, independent of triangulation
funcs = [
lambda x, y: 0*x + 1,
lambda x, y: 0 + x,
lambda x, y: -2 + y,
lambda x, y: 3 + 3*x + 14.15*y,
]
for j, func in enumerate(funcs):
self._check_accuracy(
func, tol=1e-13, atol=1e-7, rtol=1e-7, err_msg=f"Function {j}"
)
self._check_accuracy(
func, tol=1e-13, atol=1e-7, rtol=1e-7, alternate=True,
err_msg=f"Function (alternate) {j}"
)
# check rescaling
self._check_accuracy(
func, tol=1e-13, atol=1e-7, rtol=1e-7,
err_msg=f"Function (rescaled) {j}", rescale=True
)
self._check_accuracy(
func, tol=1e-13, atol=1e-7, rtol=1e-7, alternate=True, rescale=True,
err_msg=f"Function (alternate, rescaled) {j}"
)
def test_quadratic_smoketest(self):
# Should be reasonably accurate for quadratic functions
funcs = [
lambda x, y: x**2,
lambda x, y: y**2,
lambda x, y: x**2 - y**2,
lambda x, y: x*y,
]
for j, func in enumerate(funcs):
self._check_accuracy(
func, tol=1e-9, atol=0.22, rtol=0, err_msg=f"Function {j}"
)
self._check_accuracy(
func, tol=1e-9, atol=0.22, rtol=0, err_msg=f"Function {j}", rescale=True
)
def test_tri_input(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 3j*y
tri = qhull.Delaunay(x)
yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
assert_almost_equal(y, yi)
def test_tri_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 3j*y
tri = qhull.Delaunay(x)
match = ("Rescaling is not supported when passing a "
"Delaunay triangulation as ``points``.")
with pytest.raises(ValueError, match=match):
interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
def test_tripoints_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 3j*y
tri = qhull.Delaunay(x)
yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
assert_almost_equal(yi, yi_rescale)
@pytest.mark.fail_slow(5)
def test_dense(self):
# Should be more accurate for dense meshes
funcs = [
lambda x, y: x**2,
lambda x, y: y**2,
lambda x, y: x**2 - y**2,
lambda x, y: x*y,
lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
]
rng = np.random.RandomState(4321) # use a different seed than the check!
grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
rng.rand(30*30, 2)]
for j, func in enumerate(funcs):
self._check_accuracy(
func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2, err_msg=f"Function {j}"
)
self._check_accuracy(
func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
err_msg=f"Function {j}", rescale=True
)
def test_wrong_ndim(self):
x = np.random.randn(30, 3)
y = np.random.randn(30)
assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
def test_pickle(self):
# Test at single points
rng = np.random.RandomState(1234)
x = rng.rand(30, 2)
y = rng.rand(30) + 1j*rng.rand(30)
ip = interpnd.CloughTocher2DInterpolator(x, y)
ip2 = pickle.loads(pickle.dumps(ip))
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
def test_boundary_tri_symmetry(self):
# Interpolation at neighbourless triangles should retain
# symmetry with mirroring the triangle.
# Equilateral triangle
points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])
values = np.array([1, 0, 0])
ip = interpnd.CloughTocher2DInterpolator(points, values)
# Set gradient to zero at vertices
ip.grad[...] = 0
# Interpolation should be symmetric vs. bisector
alpha = 0.3
p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])
p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])
v1 = ip(p1)
v2 = ip(p2)
xp_assert_close(v1, v2)
# ... and affine invariant
rng = np.random.RandomState(1)
A = rng.randn(2, 2)
b = rng.randn(2)
points = A.dot(points.T).T + b[None,:]
p1 = A.dot(p1) + b
p2 = A.dot(p2) + b
ip = interpnd.CloughTocher2DInterpolator(points, values)
ip.grad[...] = 0
w1 = ip(p1)
w2 = ip(p2)
xp_assert_close(w1, v1)
xp_assert_close(w2, v2)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,308 @@
import numpy as np
from scipy._lib._array_api import (
xp_assert_equal, xp_assert_close
)
import pytest
from pytest import raises as assert_raises
from scipy.interpolate import (griddata, NearestNDInterpolator,
LinearNDInterpolator,
CloughTocher2DInterpolator)
from scipy._lib._testutils import _run_concurrent_barrier
parametrize_interpolators = pytest.mark.parametrize(
"interpolator", [NearestNDInterpolator, LinearNDInterpolator,
CloughTocher2DInterpolator]
)
parametrize_methods = pytest.mark.parametrize(
'method',
('nearest', 'linear', 'cubic'),
)
parametrize_rescale = pytest.mark.parametrize(
'rescale',
(True, False),
)
class TestGriddata:
def test_fill_value(self):
x = [(0,0), (0,1), (1,0)]
y = [1, 2, 3]
yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
xp_assert_equal(yi, [-1., -1, 1])
yi = griddata(x, y, [(1,1), (1,2), (0,0)])
xp_assert_equal(yi, [np.nan, np.nan, 1])
@parametrize_methods
@parametrize_rescale
def test_alternative_call(self, method, rescale):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
+ np.array([0,1])[None,:])
msg = repr((method, rescale))
yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
rescale=rescale)
xp_assert_close(y, yi, atol=1e-14, err_msg=msg)
@parametrize_methods
@parametrize_rescale
def test_multivalue_2d(self, method, rescale):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
+ np.array([0,1])[None,:])
msg = repr((method, rescale))
yi = griddata(x, y, x, method=method, rescale=rescale)
xp_assert_close(y, yi, atol=1e-14, err_msg=msg)
@parametrize_methods
@parametrize_rescale
def test_multipoint_2d(self, method, rescale):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
msg = repr((method, rescale))
yi = griddata(x, y, xi, method=method, rescale=rescale)
assert yi.shape == (5, 3), msg
xp_assert_close(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=msg)
@parametrize_methods
@parametrize_rescale
def test_complex_2d(self, method, rescale):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 2j*y[::-1]
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
msg = repr((method, rescale))
yi = griddata(x, y, xi, method=method, rescale=rescale)
assert yi.shape == (5, 3)
xp_assert_close(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=msg)
@parametrize_methods
def test_1d(self, method):
x = np.array([1, 2.5, 3, 4.5, 5, 6])
y = np.array([1, 2, 0, 3.9, 2, 1])
xp_assert_close(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-14)
xp_assert_close(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-14)
xp_assert_close(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-14)
def test_1d_borders(self):
# Test for nearest neighbor case with xi outside
# the range of the values.
x = np.array([1, 2.5, 3, 4.5, 5, 6])
y = np.array([1, 2, 0, 3.9, 2, 1])
xi = np.array([0.9, 6.5])
yi_should = np.array([1.0, 1.0])
method = 'nearest'
xp_assert_close(griddata(x, y, xi,
method=method), yi_should,
err_msg=method,
atol=1e-14)
xp_assert_close(griddata(x.reshape(6, 1), y, xi,
method=method), yi_should,
err_msg=method,
atol=1e-14)
xp_assert_close(griddata((x, ), y, (xi, ),
method=method), yi_should,
err_msg=method,
atol=1e-14)
@parametrize_methods
def test_1d_unsorted(self, method):
x = np.array([2.5, 1, 4.5, 5, 6, 3])
y = np.array([1, 2, 0, 3.9, 2, 1])
xp_assert_close(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-10)
xp_assert_close(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-10)
xp_assert_close(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-10)
@parametrize_methods
def test_square_rescale_manual(self, method):
points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.float64)
points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)],
dtype=np.float64)
values = np.array([1., 2., -3., 5., 9.], dtype=np.float64)
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
np.linspace(0, 100, 14)[None,:])
xx = xx.ravel()
yy = yy.ravel()
xi = np.array([xx, yy]).T.copy()
msg = method
zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
method=method)
zi_rescaled = griddata(points, values, xi, method=method,
rescale=True)
xp_assert_close(zi, zi_rescaled, err_msg=msg,
atol=1e-12)
@parametrize_methods
def test_xi_1d(self, method):
# Check that 1-D xi is interpreted as a coordinate
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.float64)
y = np.arange(x.shape[0], dtype=np.float64)
y = y - 2j*y[::-1]
xi = np.array([0.5, 0.5])
p1 = griddata(x, y, xi, method=method)
p2 = griddata(x, y, xi[None,:], method=method)
xp_assert_close(p1, p2, err_msg=method)
xi1 = np.array([0.5])
xi3 = np.array([0.5, 0.5, 0.5])
assert_raises(ValueError, griddata, x, y, xi1,
method=method)
assert_raises(ValueError, griddata, x, y, xi3,
method=method)
class TestNearestNDInterpolator:
def test_nearest_options(self):
# smoke test that NearestNDInterpolator accept cKDTree options
npts, nd = 4, 3
x = np.arange(npts*nd).reshape((npts, nd))
y = np.arange(npts)
nndi = NearestNDInterpolator(x, y)
opts = {'balanced_tree': False, 'compact_nodes': False}
nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
xp_assert_close(nndi(x), nndi_o(x), atol=1e-14)
def test_nearest_list_argument(self):
nd = np.array([[0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 2]])
d = nd[:, 3:]
# z is np.array
NI = NearestNDInterpolator((d[0], d[1]), d[2])
xp_assert_equal(NI([0.1, 0.9], [0.1, 0.9]), [0.0, 2.0])
# z is list
NI = NearestNDInterpolator((d[0], d[1]), list(d[2]))
xp_assert_equal(NI([0.1, 0.9], [0.1, 0.9]), [0.0, 2.0])
def test_nearest_query_options(self):
nd = np.array([[0, 0.5, 0, 1],
[0, 0, 0.5, 1],
[0, 1, 1, 2]])
delta = 0.1
query_points = [0 + delta, 1 + delta], [0 + delta, 1 + delta]
# case 1 - query max_dist is smaller than
# the query points' nearest distance to nd.
NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
xp_assert_equal(NI(query_points, distance_upper_bound=distance_upper_bound),
[np.nan, np.nan])
# case 2 - query p is inf, will return [0, 2]
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
p = np.inf
xp_assert_equal(
NI(query_points, distance_upper_bound=distance_upper_bound, p=p),
[0.0, 2.0]
)
# case 3 - query max_dist is larger, so should return non np.nan
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) + 1e-7
xp_assert_equal(
NI(query_points, distance_upper_bound=distance_upper_bound),
[0.0, 2.0]
)
def test_nearest_query_valid_inputs(self):
nd = np.array([[0, 1, 0, 1],
[0, 0, 1, 1],
[0, 1, 1, 2]])
NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
with assert_raises(TypeError):
NI([0.5, 0.5], query_options="not a dictionary")
@pytest.mark.thread_unsafe
def test_concurrency(self):
npts, nd = 50, 3
x = np.arange(npts * nd).reshape((npts, nd))
y = np.arange(npts)
nndi = NearestNDInterpolator(x, y)
def worker_fn(_, spl):
spl(x)
_run_concurrent_barrier(10, worker_fn, nndi)
class TestNDInterpolators:
@parametrize_interpolators
def test_broadcastable_input(self, interpolator):
# input data
rng = np.random.RandomState(0)
x = rng.random(10)
y = rng.random(10)
z = np.hypot(x, y)
# x-y grid for interpolation
X = np.linspace(min(x), max(x))
Y = np.linspace(min(y), max(y))
X, Y = np.meshgrid(X, Y)
XY = np.vstack((X.ravel(), Y.ravel())).T
interp = interpolator(list(zip(x, y)), z)
# single array input
interp_points0 = interp(XY)
# tuple input
interp_points1 = interp((X, Y))
interp_points2 = interp((X, 0.0))
# broadcastable input
interp_points3 = interp(X, Y)
interp_points4 = interp(X, 0.0)
assert (interp_points0.size ==
interp_points1.size ==
interp_points2.size ==
interp_points3.size ==
interp_points4.size)
@parametrize_interpolators
def test_read_only(self, interpolator):
# input data
rng = np.random.RandomState(0)
xy = rng.random((10, 2))
x, y = xy[:, 0], xy[:, 1]
z = np.hypot(x, y)
# interpolation points
XY = rng.random((50, 2))
xy.setflags(write=False)
z.setflags(write=False)
XY.setflags(write=False)
interp = interpolator(xy, z)
interp(XY)

View file

@ -0,0 +1,107 @@
import numpy as np
from scipy.interpolate import pade
from scipy._lib._array_api import (
xp_assert_equal, assert_array_almost_equal
)
def test_pade_trivial():
nump, denomp = pade([1.0], 0)
xp_assert_equal(nump.c, np.asarray([1.0]))
xp_assert_equal(denomp.c, np.asarray([1.0]))
nump, denomp = pade([1.0], 0, 0)
xp_assert_equal(nump.c, np.asarray([1.0]))
xp_assert_equal(denomp.c, np.asarray([1.0]))
def test_pade_4term_exp():
# First four Taylor coefficients of exp(x).
# Unlike poly1d, the first array element is the zero-order term.
an = [1.0, 1.0, 0.5, 1.0/6]
nump, denomp = pade(an, 0)
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
assert_array_almost_equal(denomp.c, [1.0])
nump, denomp = pade(an, 1)
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
nump, denomp = pade(an, 2)
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
nump, denomp = pade(an, 3)
assert_array_almost_equal(nump.c, [1.0])
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
# Testing inclusion of optional parameter
nump, denomp = pade(an, 0, 3)
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
assert_array_almost_equal(denomp.c, [1.0])
nump, denomp = pade(an, 1, 2)
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
nump, denomp = pade(an, 2, 1)
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
nump, denomp = pade(an, 3, 0)
assert_array_almost_equal(nump.c, [1.0])
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
# Testing reducing array.
nump, denomp = pade(an, 0, 2)
assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0])
assert_array_almost_equal(denomp.c, [1.0])
nump, denomp = pade(an, 1, 1)
assert_array_almost_equal(nump.c, [1.0/2, 1.0])
assert_array_almost_equal(denomp.c, [-1.0/2, 1.0])
nump, denomp = pade(an, 2, 0)
assert_array_almost_equal(nump.c, [1.0])
assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0])
def test_pade_ints():
# Simple test sequences (one of ints, one of floats).
an_int = [1, 2, 3, 4]
an_flt = [1.0, 2.0, 3.0, 4.0]
# Make sure integer arrays give the same result as float arrays with same values.
for i in range(0, len(an_int)):
for j in range(0, len(an_int) - i):
# Create float and int pade approximation for given order.
nump_int, denomp_int = pade(an_int, i, j)
nump_flt, denomp_flt = pade(an_flt, i, j)
# Check that they are the same.
xp_assert_equal(nump_int.c, nump_flt.c)
xp_assert_equal(denomp_int.c, denomp_flt.c)
def test_pade_complex():
# Test sequence with known solutions - see page 6 of 10.1109/PESGM.2012.6344759.
# Variable x is parameter - these tests will work with any complex number.
x = 0.2 + 0.6j
an = [1.0, x, -x*x.conjugate(), x.conjugate()*(x**2) + x*(x.conjugate()**2),
-(x**3)*x.conjugate() - 3*(x*x.conjugate())**2 - x*(x.conjugate()**3)]
nump, denomp = pade(an, 1, 1)
assert_array_almost_equal(nump.c, [x + x.conjugate(), 1.0])
assert_array_almost_equal(denomp.c, [x.conjugate(), 1.0])
nump, denomp = pade(an, 1, 2)
assert_array_almost_equal(nump.c, [x**2, 2*x + x.conjugate(), 1.0])
assert_array_almost_equal(denomp.c, [x + x.conjugate(), 1.0])
nump, denomp = pade(an, 2, 2)
assert_array_almost_equal(
nump.c,
[x**2 + x*x.conjugate() + x.conjugate()**2, 2*(x + x.conjugate()), 1.0]
)
assert_array_almost_equal(denomp.c, [x.conjugate()**2, x + 2*x.conjugate(), 1.0])

View file

@ -0,0 +1,972 @@
import warnings
import io
import numpy as np
from scipy._lib._array_api import (
xp_assert_equal, xp_assert_close, assert_array_almost_equal, assert_almost_equal
)
from pytest import raises as assert_raises
import pytest
from scipy.interpolate import (
KroghInterpolator, krogh_interpolate,
BarycentricInterpolator, barycentric_interpolate,
approximate_taylor_polynomial, CubicHermiteSpline, pchip,
PchipInterpolator, pchip_interpolate, Akima1DInterpolator, CubicSpline,
make_interp_spline)
from scipy._lib._testutils import _run_concurrent_barrier
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
extra_args=None):
if extra_args is None:
extra_args = {}
rng = np.random.RandomState(1234)
x = [-1, 0, 1, 2, 3, 4]
s = list(range(1, len(y_shape)+1))
s.insert(axis % (len(y_shape)+1), 0)
y = rng.rand(*((6,) + y_shape)).transpose(s)
xi = np.zeros(x_shape)
if interpolator_cls is CubicHermiteSpline:
dydx = rng.rand(*((6,) + y_shape)).transpose(s)
yi = interpolator_cls(x, y, dydx, axis=axis, **extra_args)(xi)
else:
yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ x_shape + y.shape[axis:][1:])
assert yi.shape == target_shape
# check it works also with lists
if x_shape and y.size > 0:
if interpolator_cls is CubicHermiteSpline:
interpolator_cls(list(x), list(y), list(dydx), axis=axis,
**extra_args)(list(xi))
else:
interpolator_cls(list(x), list(y), axis=axis,
**extra_args)(list(xi))
# check also values
if xi.size > 0 and deriv_shape is None:
bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
yv = yv.reshape(bs_shape)
yi, y = np.broadcast_arrays(yi, yv)
xp_assert_close(yi, y)
SHAPES = [(), (0,), (1,), (6, 2, 5)]
def test_shapes():
def spl_interp(x, y, axis):
return make_interp_spline(x, y, axis=axis)
for ip in [KroghInterpolator, BarycentricInterpolator, CubicHermiteSpline,
pchip, Akima1DInterpolator, CubicSpline, spl_interp]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
if ip != CubicSpline:
check_shape(ip, s1, s2, None, axis)
else:
for bc in ['natural', 'clamped']:
extra = {'bc_type': bc}
check_shape(ip, s1, s2, None, axis, extra)
def test_derivs_shapes():
for ip in [KroghInterpolator, BarycentricInterpolator]:
def interpolator_derivs(x, y, axis=0):
return ip(x, y, axis).derivatives
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
check_shape(interpolator_derivs, s1, s2, (6,), axis)
def test_deriv_shapes():
def krogh_deriv(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivative
def bary_deriv(x, y, axis=0):
return BarycentricInterpolator(x, y, axis).derivative
def pchip_deriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_deriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_antideriv(x, y, axis=0):
return pchip(x, y, axis).antiderivative()
def pchip_antideriv2(x, y, axis=0):
return pchip(x, y, axis).antiderivative(2)
def pchip_deriv_inplace(x, y, axis=0):
class P(PchipInterpolator):
def __call__(self, x):
return PchipInterpolator.__call__(self, x, 1)
pass
return P(x, y, axis)
def akima_deriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).derivative()
def akima_antideriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).antiderivative()
def cspline_deriv(x, y, axis=0):
return CubicSpline(x, y, axis).derivative()
def cspline_antideriv(x, y, axis=0):
return CubicSpline(x, y, axis).antiderivative()
def bspl_deriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).derivative()
def bspl_antideriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).antiderivative()
for ip in [krogh_deriv, bary_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
check_shape(ip, s1, s2, (), axis)
def test_complex():
x = [1, 2, 3, 4]
y = [1, 2, 1j, 3]
for ip in [KroghInterpolator, BarycentricInterpolator, CubicSpline]:
p = ip(x, y)
xp_assert_close(p(x), np.asarray(y))
dydx = [0, -1j, 2, 3j]
p = CubicHermiteSpline(x, y, dydx)
xp_assert_close(p(x), np.asarray(y))
xp_assert_close(p(x, 1), np.asarray(dydx))
class TestKrogh:
def setup_method(self):
self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7), P(7), check_0d=False)
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)), check_0d=False)
def test_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs)
for i in range(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_low_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs,len(self.xs)+2)
for i in range(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
m = 10
r = P.derivatives(self.test_xs,m)
for i in range(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_high_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
for i in range(len(self.xs), 2*len(self.xs)):
assert_almost_equal(P.derivative(self.test_xs,i),
np.zeros(len(self.test_xs)))
def test_ndim_derivatives(self):
poly1 = self.true_poly
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
P = KroghInterpolator(self.xs, ys, axis=0)
D = P.derivatives(self.test_xs)
for i in range(D.shape[0]):
xp_assert_close(D[i],
np.stack((poly1.deriv(i)(self.test_xs),
poly2.deriv(i)(self.test_xs),
poly3.deriv(i)(self.test_xs)),
axis=-1))
def test_ndim_derivative(self):
poly1 = self.true_poly
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
P = KroghInterpolator(self.xs, ys, axis=0)
for i in range(P.n):
xp_assert_close(P.derivative(self.test_xs, i),
np.stack((poly1.deriv(i)(self.test_xs),
poly2.deriv(i)(self.test_xs),
poly3.deriv(i)(self.test_xs)),
axis=-1))
def test_hermite(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = KroghInterpolator(xs,ys)
Pi = [KroghInterpolator(xs,ys[:,i]) for i in range(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.asarray([p(test_xs) for p in Pi]).T)
assert_almost_equal(P.derivatives(test_xs),
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
(1,2,0)))
def test_empty(self):
P = KroghInterpolator(self.xs,self.ys)
xp_assert_equal(P([]), np.asarray([]))
def test_shapes_scalarvalue(self):
P = KroghInterpolator(self.xs,self.ys)
assert np.shape(P(0)) == ()
assert np.shape(P(np.array(0))) == ()
assert np.shape(P([0])) == (1,)
assert np.shape(P([0,1])) == (2,)
def test_shapes_scalarvalue_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
n = P.n
assert np.shape(P.derivatives(0)) == (n,)
assert np.shape(P.derivatives(np.array(0))) == (n,)
assert np.shape(P.derivatives([0])) == (n, 1)
assert np.shape(P.derivatives([0, 1])) == (n, 2)
def test_shapes_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert np.shape(P(0)) == (3,)
assert np.shape(P([0])) == (1, 3)
assert np.shape(P([0, 1])) == (2, 3)
def test_shapes_1d_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
assert np.shape(P(0)) == (1,)
assert np.shape(P([0])) == (1, 1)
assert np.shape(P([0,1])) == (2, 1)
def test_shapes_vectorvalue_derivative(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
n = P.n
assert np.shape(P.derivatives(0)) == (n, 3)
assert np.shape(P.derivatives([0])) == (n, 1, 3)
assert np.shape(P.derivatives([0,1])) == (n, 2, 3)
def test_wrapper(self):
P = KroghInterpolator(self.xs, self.ys)
ki = krogh_interpolate
assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
assert_almost_equal(P.derivative(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=2))
assert_almost_equal(P.derivatives(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
def test_int_inputs(self):
# Check input args are cast correctly to floats, gh-3669
x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
13104, 60000]
offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
-0.48002351, -0.34925329, -0.26503107,
-0.13148093, -0.12988833, -0.12979296,
-0.12973574, -0.08582937, 0.05])
f = KroghInterpolator(x, offset_cdf)
xp_assert_close(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
np.zeros_like(offset_cdf), atol=1e-10)
def test_derivatives_complex(self):
# regression test for gh-7381: krogh.derivatives(0) fails complex y
x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
func = KroghInterpolator(x, y)
cmplx = func.derivatives(0)
cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
1j*KroghInterpolator(x, y.imag).derivatives(0))
xp_assert_close(cmplx, cmplx2, atol=1e-15)
@pytest.mark.thread_unsafe
def test_high_degree_warning(self):
with pytest.warns(UserWarning, match="40 degrees provided,"):
KroghInterpolator(np.arange(40), np.ones(40))
@pytest.mark.thread_unsafe
def test_concurrency(self):
P = KroghInterpolator(self.xs, self.ys)
def worker_fn(_, interp):
interp(self.xs)
_run_concurrent_barrier(10, worker_fn, P)
class TestTaylor:
def test_exponential(self):
degree = 5
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
for i in range(degree+1):
assert_almost_equal(p(0),1)
p = p.deriv()
assert_almost_equal(p(0),0)
class TestBarycentric:
def setup_method(self):
self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
self.test_xs = np.linspace(-1, 1, 100)
self.xs = np.linspace(-1, 1, 5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
# Ensure backwards compatible post SPEC7
P = BarycentricInterpolator(self.xs, self.ys, random_state=1)
xp_assert_close(P(self.test_xs), self.true_poly(self.test_xs))
def test_scalar(self):
P = BarycentricInterpolator(self.xs, self.ys, rng=1)
xp_assert_close(P(7), self.true_poly(7), check_0d=False)
xp_assert_close(P(np.array(7)), self.true_poly(np.array(7)), check_0d=False)
def test_derivatives(self):
P = BarycentricInterpolator(self.xs, self.ys)
D = P.derivatives(self.test_xs)
for i in range(D.shape[0]):
xp_assert_close(self.true_poly.deriv(i)(self.test_xs), D[i])
def test_low_derivatives(self):
P = BarycentricInterpolator(self.xs, self.ys)
D = P.derivatives(self.test_xs, len(self.xs)+2)
for i in range(D.shape[0]):
xp_assert_close(self.true_poly.deriv(i)(self.test_xs),
D[i],
atol=1e-12)
def test_derivative(self):
P = BarycentricInterpolator(self.xs, self.ys)
m = 10
r = P.derivatives(self.test_xs, m)
for i in range(m):
xp_assert_close(P.derivative(self.test_xs, i), r[i])
def test_high_derivative(self):
P = BarycentricInterpolator(self.xs, self.ys)
for i in range(len(self.xs), 5*len(self.xs)):
xp_assert_close(P.derivative(self.test_xs, i),
np.zeros(len(self.test_xs)))
def test_ndim_derivatives(self):
poly1 = self.true_poly
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
P = BarycentricInterpolator(self.xs, ys, axis=0)
D = P.derivatives(self.test_xs)
for i in range(D.shape[0]):
xp_assert_close(D[i],
np.stack((poly1.deriv(i)(self.test_xs),
poly2.deriv(i)(self.test_xs),
poly3.deriv(i)(self.test_xs)),
axis=-1),
atol=1e-12)
def test_ndim_derivative(self):
poly1 = self.true_poly
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
P = BarycentricInterpolator(self.xs, ys, axis=0)
for i in range(P.n):
xp_assert_close(P.derivative(self.test_xs, i),
np.stack((poly1.deriv(i)(self.test_xs),
poly2.deriv(i)(self.test_xs),
poly3.deriv(i)(self.test_xs)),
axis=-1),
atol=1e-12)
def test_delayed(self):
P = BarycentricInterpolator(self.xs)
P.set_yi(self.ys)
assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
def test_append(self):
P = BarycentricInterpolator(self.xs[:3], self.ys[:3])
P.add_xi(self.xs[3:], self.ys[3:])
assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0, 1], [1, 0], [2, 1]])
BI = BarycentricInterpolator
P = BI(xs, ys)
Pi = [BI(xs, ys[:, i]) for i in range(ys.shape[1])]
test_xs = np.linspace(-1, 3, 100)
assert_almost_equal(P(test_xs),
np.asarray([p(test_xs) for p in Pi]).T)
def test_shapes_scalarvalue(self):
P = BarycentricInterpolator(self.xs, self.ys)
assert np.shape(P(0)) == ()
assert np.shape(P(np.array(0))) == ()
assert np.shape(P([0])) == (1,)
assert np.shape(P([0, 1])) == (2,)
def test_shapes_scalarvalue_derivative(self):
P = BarycentricInterpolator(self.xs,self.ys)
n = P.n
assert np.shape(P.derivatives(0)) == (n,)
assert np.shape(P.derivatives(np.array(0))) == (n,)
assert np.shape(P.derivatives([0])) == (n,1)
assert np.shape(P.derivatives([0,1])) == (n,2)
def test_shapes_vectorvalue(self):
P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3)))
assert np.shape(P(0)) == (3,)
assert np.shape(P([0])) == (1, 3)
assert np.shape(P([0, 1])) == (2, 3)
def test_shapes_1d_vectorvalue(self):
P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1]))
assert np.shape(P(0)) == (1,)
assert np.shape(P([0])) == (1, 1)
assert np.shape(P([0, 1])) == (2, 1)
def test_shapes_vectorvalue_derivative(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
n = P.n
assert np.shape(P.derivatives(0)) == (n, 3)
assert np.shape(P.derivatives([0])) == (n, 1, 3)
assert np.shape(P.derivatives([0, 1])) == (n, 2, 3)
def test_wrapper(self):
P = BarycentricInterpolator(self.xs, self.ys, rng=1)
bi = barycentric_interpolate
xp_assert_close(P(self.test_xs), bi(self.xs, self.ys, self.test_xs, rng=1))
xp_assert_close(P.derivative(self.test_xs, 2),
bi(self.xs, self.ys, self.test_xs, der=2, rng=1))
xp_assert_close(P.derivatives(self.test_xs, 2),
bi(self.xs, self.ys, self.test_xs, der=[0, 1], rng=1))
def test_int_input(self):
x = 1000 * np.arange(1, 11) # np.prod(x[-1] - x[:-1]) overflows
y = np.arange(1, 11)
value = barycentric_interpolate(x, y, 1000 * 9.5)
assert_almost_equal(value, np.asarray(9.5))
def test_large_chebyshev(self):
# The weights for Chebyshev points of the second kind have analytically
# solvable weights. Naive calculation of barycentric weights will fail
# for large N because of numerical underflow and overflow. We test
# correctness for large N against analytical Chebyshev weights.
# Without capacity scaling or permutation, n=800 fails,
# With just capacity scaling, n=1097 fails
# With both capacity scaling and random permutation, n=30000 succeeds
n = 1100
j = np.arange(n + 1).astype(np.float64)
x = np.cos(j * np.pi / n)
# See page 506 of Berrut and Trefethen 2004 for this formula
w = (-1) ** j
w[0] *= 0.5
w[-1] *= 0.5
P = BarycentricInterpolator(x)
# It's okay to have a constant scaling factor in the weights because it
# cancels out in the evaluation of the polynomial.
factor = P.wi[0]
assert_almost_equal(P.wi / (2 * factor), w)
def test_warning(self):
# Test if the divide-by-zero warning is properly ignored when computing
# interpolated values equals to interpolation points
P = BarycentricInterpolator([0, 1], [1, 2])
with np.errstate(divide='raise'):
yi = P(P.xi)
# Check if the interpolated values match the input values
# at the nodes
assert_almost_equal(yi, P.yi.ravel())
@pytest.mark.thread_unsafe
def test_repeated_node(self):
# check that a repeated node raises a ValueError
# (computing the weights requires division by xi[i] - xi[j])
xis = np.array([0.1, 0.5, 0.9, 0.5])
ys = np.array([1, 2, 3, 4])
with pytest.raises(ValueError,
match="Interpolation points xi must be distinct."):
BarycentricInterpolator(xis, ys)
@pytest.mark.thread_unsafe
def test_concurrency(self):
P = BarycentricInterpolator(self.xs, self.ys)
def worker_fn(_, interp):
interp(self.xs)
_run_concurrent_barrier(10, worker_fn, P)
class TestPCHIP:
def _make_random(self, npts=20):
rng = np.random.RandomState(1234)
xi = np.sort(rng.random(npts))
yi = rng.random(npts)
return pchip(xi, yi), xi, yi
def test_overshoot(self):
# PCHIP should not overshoot
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
if y1 > y2:
y1, y2 = y2, y1
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert ((y1 <= yp + 1e-15) & (yp <= y2 + 1e-15)).all()
def test_monotone(self):
# PCHIP should preserve monotonicty
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert ((y2-y1) * (yp[1:] - yp[:1]) > 0).all()
def test_cast(self):
# regression test for integer input data, see gh-3453
data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
[-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
xx = np.arange(100)
curve = pchip(data[0], data[1])(xx)
data1 = data * 1.0
curve1 = pchip(data1[0], data1[1])(xx)
xp_assert_close(curve, curve1, atol=1e-14, rtol=1e-14)
def test_nag(self):
# Example from NAG C implementation,
# http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
# suggested in gh-5326 as a smoke test for the way the derivatives
# are computed (see also gh-3453)
dataStr = '''
7.99 0.00000E+0
8.09 0.27643E-4
8.19 0.43750E-1
8.70 0.16918E+0
9.20 0.46943E+0
10.00 0.94374E+0
12.00 0.99864E+0
15.00 0.99992E+0
20.00 0.99999E+0
'''
data = np.loadtxt(io.StringIO(dataStr))
pch = pchip(data[:,0], data[:,1])
resultStr = '''
7.9900 0.0000
9.1910 0.4640
10.3920 0.9645
11.5930 0.9965
12.7940 0.9992
13.9950 0.9998
15.1960 0.9999
16.3970 1.0000
17.5980 1.0000
18.7990 1.0000
20.0000 1.0000
'''
result = np.loadtxt(io.StringIO(resultStr))
xp_assert_close(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
def test_endslopes(self):
# this is a smoke test for gh-3453: PCHIP interpolator should not
# set edge slopes to zero if the data do not suggest zero edge derivatives
x = np.array([0.0, 0.1, 0.25, 0.35])
y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
for pp in (pchip(x, y1), pchip(x, y2)):
for t in (x[0], x[-1]):
assert pp(t, 1) != 0
@pytest.mark.thread_unsafe
def test_all_zeros(self):
x = np.arange(10)
y = np.zeros_like(x)
# this should work and not generate any warnings
with warnings.catch_warnings():
warnings.filterwarnings('error')
pch = pchip(x, y)
xx = np.linspace(0, 9, 101)
assert all(pch(xx) == 0.)
def test_two_points(self):
# regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
# it tries to use a three-point scheme to estimate edge derivatives,
# while there are only two points available.
# Instead, it should construct a linear interpolator.
x = np.linspace(0, 1, 11)
p = pchip([0, 1], [0, 2])
xp_assert_close(p(x), 2*x, atol=1e-15)
def test_pchip_interpolate(self):
assert_array_almost_equal(
pchip_interpolate([1, 2, 3], [4, 5, 6], [0.5], der=1),
np.asarray([1.]))
assert_array_almost_equal(
pchip_interpolate([1, 2, 3], [4, 5, 6], [0.5], der=0),
np.asarray([3.5]))
assert_array_almost_equal(
np.asarray(pchip_interpolate([1, 2, 3], [4, 5, 6], [0.5], der=[0, 1])),
np.asarray([[3.5], [1]]))
def test_roots(self):
# regression test for gh-6357: .roots method should work
p = pchip([0, 1], [-1, 1])
r = p.roots()
xp_assert_close(r, np.asarray([0.5]))
class TestCubicSpline:
@staticmethod
def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
tol=1e-14):
"""Check that spline coefficients satisfy the continuity and boundary
conditions."""
x = S.x
c = S.c
dx = np.diff(x)
dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
dxi = dx[:-1]
# Check C2 continuity.
xp_assert_close(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
xp_assert_close(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
xp_assert_close(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
rtol=tol, atol=tol)
# Check that we found a parabola, the third derivative is 0.
if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
xp_assert_close(c[0], np.zeros_like(c[0]), rtol=tol, atol=tol)
return
# Check periodic boundary conditions.
if bc_start == 'periodic':
xp_assert_close(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
xp_assert_close(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
xp_assert_close(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
return
# Check other boundary conditions.
if bc_start == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
slope = np.asarray(slope)
xp_assert_close(S(x[0], 1), slope, rtol=tol, atol=tol)
else:
xp_assert_close(c[0, 0], c[0, 1], rtol=tol, atol=tol)
elif bc_start == 'clamped':
xp_assert_close(
S(x[0], 1), np.zeros_like(S(x[0], 1)), rtol=tol, atol=tol)
elif bc_start == 'natural':
xp_assert_close(
S(x[0], 2), np.zeros_like(S(x[0], 2)), rtol=tol, atol=tol)
else:
order, value = bc_start
xp_assert_close(S(x[0], order), np.asarray(value), rtol=tol, atol=tol)
if bc_end == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
slope = np.asarray(slope)
xp_assert_close(S(x[1], 1), slope, rtol=tol, atol=tol)
else:
xp_assert_close(c[0, -1], c[0, -2], rtol=tol, atol=tol)
elif bc_end == 'clamped':
xp_assert_close(S(x[-1], 1), np.zeros_like(S(x[-1], 1)),
rtol=tol, atol=tol)
elif bc_end == 'natural':
xp_assert_close(S(x[-1], 2), np.zeros_like(S(x[-1], 2)),
rtol=2*tol, atol=2*tol)
else:
order, value = bc_end
xp_assert_close(S(x[-1], order), np.asarray(value), rtol=tol, atol=tol)
def check_all_bc(self, x, y, axis):
deriv_shape = list(y.shape)
del deriv_shape[axis]
first_deriv = np.empty(deriv_shape)
first_deriv.fill(2)
second_deriv = np.empty(deriv_shape)
second_deriv.fill(-1)
bc_all = [
'not-a-knot',
'natural',
'clamped',
(1, first_deriv),
(2, second_deriv)
]
for bc in bc_all[:3]:
S = CubicSpline(x, y, axis=axis, bc_type=bc)
self.check_correctness(S, bc, bc)
for bc_start in bc_all:
for bc_end in bc_all:
S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
self.check_correctness(S, bc_start, bc_end, tol=2e-14)
def test_general(self):
x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
for n in [2, 3, x.size]:
self.check_all_bc(x[:n], y[:n], 0)
Y = np.empty((2, n, 2))
Y[0, :, 0] = y[:n]
Y[0, :, 1] = y[:n] - 1
Y[1, :, 0] = y[:n] + 2
Y[1, :, 1] = y[:n] + 3
self.check_all_bc(x[:n], Y, 1)
def test_periodic(self):
for n in [2, 3, 5]:
x = np.linspace(0, 2 * np.pi, n)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
Y = np.empty((2, n, 2))
Y[0, :, 0] = y
Y[0, :, 1] = y + 2
Y[1, :, 0] = y - 1
Y[1, :, 1] = y + 5
S = CubicSpline(x, Y, axis=1, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
def test_periodic_eval(self):
x = np.linspace(0, 2 * np.pi, 10)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
def test_second_derivative_continuity_gh_11758(self):
# gh-11758: C2 continuity fail
x = np.array([0.9, 1.3, 1.9, 2.1, 2.6, 3.0, 3.9, 4.4, 4.7, 5.0, 6.0,
7.0, 8.0, 9.2, 10.5, 11.3, 11.6, 12.0, 12.6, 13.0, 13.3])
y = np.array([1.3, 1.5, 1.85, 2.1, 2.6, 2.7, 2.4, 2.15, 2.05, 2.1,
2.25, 2.3, 2.25, 1.95, 1.4, 0.9, 0.7, 0.6, 0.5, 0.4, 1.3])
S = CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
self.check_correctness(S, 'periodic', 'periodic')
def test_three_points(self):
# gh-11758: Fails computing a_m2_m1
# In this case, s (first derivatives) could be found manually by solving
# system of 2 linear equations. Due to solution of this system,
# s[i] = (h1m2 + h2m1) / (h1 + h2), where h1 = x[1] - x[0], h2 = x[2] - x[1],
# m1 = (y[1] - y[0]) / h1, m2 = (y[2] - y[1]) / h2
x = np.array([1.0, 2.75, 3.0])
y = np.array([1.0, 15.0, 1.0])
S = CubicSpline(x, y, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
xp_assert_close(S.derivative(1)(x), np.array([-48.0, -48.0, -48.0]))
def test_periodic_three_points_multidim(self):
# make sure one multidimensional interpolator does the same as multiple
# one-dimensional interpolators
x = np.array([0.0, 1.0, 3.0])
y = np.array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
S = CubicSpline(x, y, bc_type="periodic")
self.check_correctness(S, 'periodic', 'periodic')
S0 = CubicSpline(x, y[:, 0], bc_type="periodic")
S1 = CubicSpline(x, y[:, 1], bc_type="periodic")
q = np.linspace(0, 2, 5)
xp_assert_close(S(q)[:, 0], S0(q))
xp_assert_close(S(q)[:, 1], S1(q))
def test_dtypes(self):
x = np.array([0, 1, 2, 3], dtype=int)
y = np.array([-5, 2, 3, 1], dtype=int)
S = CubicSpline(x, y)
self.check_correctness(S)
y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
S = CubicSpline(x, y)
self.check_correctness(S)
S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
self.check_correctness(S, "natural", (1, 2j))
y = np.array([-5, 2, 3, 1])
S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
def test_small_dx(self):
rng = np.random.RandomState(0)
x = np.sort(rng.uniform(size=100))
y = 1e4 + rng.uniform(size=100)
S = CubicSpline(x, y)
self.check_correctness(S, tol=1e-13)
def test_incorrect_inputs(self):
x = np.array([1, 2, 3, 4])
y = np.array([1, 2, 3, 4])
xc = np.array([1 + 1j, 2, 3, 4])
xn = np.array([np.nan, 2, 3, 4])
xo = np.array([2, 1, 3, 4])
yn = np.array([np.nan, 2, 3, 4])
y3 = [1, 2, 3]
x1 = [1]
y1 = [1]
assert_raises(ValueError, CubicSpline, xc, y)
assert_raises(ValueError, CubicSpline, xn, y)
assert_raises(ValueError, CubicSpline, x, yn)
assert_raises(ValueError, CubicSpline, xo, y)
assert_raises(ValueError, CubicSpline, x, y3)
assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
assert_raises(ValueError, CubicSpline, x1, y1)
wrong_bc = [('periodic', 'clamped'),
((2, 0), (3, 10)),
((1, 0), ),
(0., 0.),
'not-a-typo']
for bc_type in wrong_bc:
assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
# Shapes mismatch when giving arbitrary derivative values:
Y = np.c_[y, y]
bc1 = ('clamped', (1, 0))
bc2 = ('clamped', (1, [0, 0, 0]))
bc3 = ('clamped', (1, [[0, 0]]))
assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
# periodic condition, y[-1] must be equal to y[0]:
assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
def test_CubicHermiteSpline_correctness():
x = [0, 2, 7]
y = [-1, 2, 3]
dydx = [0, 3, 7]
s = CubicHermiteSpline(x, y, dydx)
xp_assert_close(s(x), y, check_shape=False, check_dtype=False, rtol=1e-15)
xp_assert_close(s(x, 1), dydx, check_shape=False, check_dtype=False, rtol=1e-15)
def test_CubicHermiteSpline_error_handling():
x = [1, 2, 3]
y = [0, 3, 5]
dydx = [1, -1, 2, 3]
assert_raises(ValueError, CubicHermiteSpline, x, y, dydx)
dydx_with_nan = [1, 0, np.nan]
assert_raises(ValueError, CubicHermiteSpline, x, y, dydx_with_nan)
def test_roots_extrapolate_gh_11185():
x = np.array([0.001, 0.002])
y = np.array([1.66066935e-06, 1.10410807e-06])
dy = np.array([-1.60061854, -1.600619])
p = CubicHermiteSpline(x, y, dy)
# roots(extrapolate=True) for a polynomial with a single interval
# should return all three real roots
r = p.roots(extrapolate=True)
assert p.c.shape[1] == 1
assert r.size == 3
class TestZeroSizeArrays:
# regression tests for gh-17241 : CubicSpline et al must not segfault
# when y.size == 0
# The two methods below are _almost_ the same, but not quite:
# one is for objects which have the `bc_type` argument (CubicSpline)
# and the other one is for those which do not (Pchip, Akima1D)
@pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
np.zeros((10, 5, 0))])
@pytest.mark.parametrize('bc_type',
['not-a-knot', 'periodic', 'natural', 'clamped'])
@pytest.mark.parametrize('axis', [0, 1, 2])
@pytest.mark.parametrize('cls', [make_interp_spline, CubicSpline])
def test_zero_size(self, cls, y, bc_type, axis):
x = np.arange(10)
xval = np.arange(3)
obj = cls(x, y, bc_type=bc_type)
assert obj(xval).size == 0
assert obj(xval).shape == xval.shape + y.shape[1:]
# Also check with an explicit non-default axis
yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
obj = cls(x, yt, bc_type=bc_type, axis=axis)
sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
assert obj(xval).size == 0
assert obj(xval).shape == sh
@pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
np.zeros((10, 5, 0))])
@pytest.mark.parametrize('axis', [0, 1, 2])
@pytest.mark.parametrize('cls', [PchipInterpolator, Akima1DInterpolator])
def test_zero_size_2(self, cls, y, axis):
x = np.arange(10)
xval = np.arange(3)
obj = cls(x, y)
assert obj(xval).size == 0
assert obj(xval).shape == xval.shape + y.shape[1:]
# Also check with an explicit non-default axis
yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
obj = cls(x, yt, axis=axis)
sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
assert obj(xval).size == 0
assert obj(xval).shape == sh

View file

@ -0,0 +1,246 @@
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
import numpy as np
from scipy._lib._array_api import assert_array_almost_equal, assert_almost_equal
from numpy import linspace, sin, cos, exp, allclose
from scipy.interpolate._rbf import Rbf
from scipy._lib._testutils import _run_concurrent_barrier
import pytest
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0], check_0d=False)
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
rng = np.random.RandomState(1234)
x = rng.rand(50,1)*4-2
y = rng.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
rng = np.random.RandomState(1234)
x = rng.rand(50, 1)*4 - 2
y = rng.rand(50, 1)*4 - 2
z = rng.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
check_rbf1d_interpolation(function)
check_rbf2d_interpolation(function)
check_rbf3d_interpolation(function)
def check_2drbf1d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (1D)
x = linspace(0, 10, 9)
y0 = sin(x)
y1 = cos(x)
y = np.vstack([y0, y1]).T
rbf = Rbf(x, y, function=function, mode='N-D')
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_2drbf2d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (2D).
rng = np.random.RandomState(1234)
x = rng.rand(50, ) * 4 - 2
y = rng.rand(50, ) * 4 - 2
z0 = x * exp(-x ** 2 - 1j * y ** 2)
z1 = y * exp(-y ** 2 - 1j * x ** 2)
z = np.vstack([z0, z1]).T
rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
zi = rbf(x, y)
zi.shape = z.shape
assert_array_almost_equal(z, zi)
def check_2drbf3d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (3D).
rng = np.random.RandomState(1234)
x = rng.rand(50, ) * 4 - 2
y = rng.rand(50, ) * 4 - 2
z = rng.rand(50, ) * 4 - 2
d0 = x * exp(-x ** 2 - y ** 2)
d1 = y * exp(-y ** 2 - x ** 2)
d = np.vstack([d0, d1]).T
rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
di = rbf(x, y, z)
di.shape = d.shape
assert_array_almost_equal(di, d)
def test_2drbf_interpolation():
for function in FUNCTIONS:
check_2drbf1d_interpolation(function)
check_2drbf2d_interpolation(function)
check_2drbf3d_interpolation(function)
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
msg = f"abs-diff: {abs(yi - sin(xi)).max():f}"
assert allclose(yi, sin(xi), atol=atol), msg
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
def check_2drbf1d_regularity(function, atol):
# Check that the 2-D Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y0 = sin(x)
y1 = cos(x)
y = np.vstack([y0, y1]).T
rbf = Rbf(x, y, function=function, mode='N-D')
xi = linspace(0, 10, 100)
yi = rbf(xi)
msg = f"abs-diff: {abs(yi - np.vstack([sin(xi), cos(xi)]).T).max():f}"
assert allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg
def test_2drbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.15,
'linear': 0.2
}
for function in FUNCTIONS:
check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
rng = np.random.RandomState(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * rng.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1
def test_rbf_stability():
for function in FUNCTIONS:
check_rbf1d_stability(function)
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
def linfunc(x):
return x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert rbf.epsilon > 0
@pytest.mark.thread_unsafe
def test_rbf_concurrency():
x = linspace(0, 10, 100)
y0 = sin(x)
y1 = cos(x)
y = np.vstack([y0, y1]).T
rbf = Rbf(x, y, mode='N-D')
def worker_fn(_, interp, xp):
interp(xp)
_run_concurrent_barrier(10, worker_fn, rbf, x)

View file

@ -0,0 +1,534 @@
import pickle
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from scipy._lib._array_api import xp_assert_close
from scipy.stats.qmc import Halton
from scipy.spatial import cKDTree # type: ignore[attr-defined]
from scipy.interpolate._rbfinterp import (
_AVAILABLE, _SCALE_INVARIANT, _NAME_TO_MIN_DEGREE, _monomial_powers,
RBFInterpolator
)
from scipy.interpolate import _rbfinterp_pythran
from scipy._lib._testutils import _run_concurrent_barrier
def _vandermonde(x, degree):
# Returns a matrix of monomials that span polynomials with the specified
# degree evaluated at x.
powers = _monomial_powers(x.shape[1], degree)
return _rbfinterp_pythran._polynomial_matrix(x, powers)
def _1d_test_function(x):
# Test function used in Wahba's "Spline Models for Observational Data".
# domain ~= (0, 3), range ~= (-1.0, 0.2)
x = x[:, 0]
y = 4.26*(np.exp(-x) - 4*np.exp(-2*x) + 3*np.exp(-3*x))
return y
def _2d_test_function(x):
# Franke's test function.
# domain ~= (0, 1) X (0, 1), range ~= (0.0, 1.2)
x1, x2 = x[:, 0], x[:, 1]
term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
y = term1 + term2 + term3 + term4
return y
def _is_conditionally_positive_definite(kernel, m):
# Tests whether the kernel is conditionally positive definite of order m.
# See chapter 7 of Fasshauer's "Meshfree Approximation Methods with
# MATLAB".
nx = 10
ntests = 100
for ndim in [1, 2, 3, 4, 5]:
# Generate sample points with a Halton sequence to avoid samples that
# are too close to each other, which can make the matrix singular.
seq = Halton(ndim, scramble=False, seed=np.random.RandomState())
for _ in range(ntests):
x = 2*seq.random(nx) - 1
A = _rbfinterp_pythran._kernel_matrix(x, kernel)
P = _vandermonde(x, m - 1)
Q, R = np.linalg.qr(P, mode='complete')
# Q2 forms a basis spanning the space where P.T.dot(x) = 0. Project
# A onto this space, and then see if it is positive definite using
# the Cholesky decomposition. If not, then the kernel is not c.p.d.
# of order m.
Q2 = Q[:, P.shape[1]:]
B = Q2.T.dot(A).dot(Q2)
try:
np.linalg.cholesky(B)
except np.linalg.LinAlgError:
return False
return True
# Sorting the parametrize arguments is necessary to avoid a parallelization
# issue described here: https://github.com/pytest-dev/pytest-xdist/issues/432.
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_conditionally_positive_definite(kernel):
# Test if each kernel in _AVAILABLE is conditionally positive definite of
# order m, where m comes from _NAME_TO_MIN_DEGREE. This is a necessary
# condition for the smoothed RBF interpolant to be well-posed in general.
m = _NAME_TO_MIN_DEGREE.get(kernel, -1) + 1
assert _is_conditionally_positive_definite(kernel, m)
class _TestRBFInterpolator:
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
def test_scale_invariance_1d(self, kernel):
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
# shape parameter (when smoothing == 0) in 1d.
seq = Halton(1, scramble=False, seed=np.random.RandomState())
x = 3*seq.random(50)
y = _1d_test_function(x)
xitp = 3*seq.random(50)
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
def test_scale_invariance_2d(self, kernel):
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
# shape parameter (when smoothing == 0) in 2d.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
y = _2d_test_function(x)
xitp = seq.random(100)
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_extreme_domains(self, kernel):
# Make sure the interpolant remains numerically stable for very
# large/small domains.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
scale = 1e50
shift = 1e55
x = seq.random(100)
y = _2d_test_function(x)
xitp = seq.random(100)
if kernel in _SCALE_INVARIANT:
yitp1 = self.build(x, y, kernel=kernel)(xitp)
yitp2 = self.build(
x*scale + shift, y,
kernel=kernel
)(xitp*scale + shift)
else:
yitp1 = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
yitp2 = self.build(
x*scale + shift, y,
epsilon=5.0/scale,
kernel=kernel
)(xitp*scale + shift)
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_polynomial_reproduction(self):
# If the observed data comes from a polynomial, then the interpolant
# should be able to reproduce the polynomial exactly, provided that
# `degree` is sufficiently high.
rng = np.random.RandomState(0)
seq = Halton(2, scramble=False, seed=rng)
degree = 3
x = seq.random(50)
xitp = seq.random(50)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
y = P.dot(poly_coeffs)
yitp1 = Pitp.dot(poly_coeffs)
yitp2 = self.build(x, y, degree=degree)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
@pytest.mark.slow
def test_chunking(self, monkeypatch):
# If the observed data comes from a polynomial, then the interpolant
# should be able to reproduce the polynomial exactly, provided that
# `degree` is sufficiently high.
rng = np.random.RandomState(0)
seq = Halton(2, scramble=False, seed=rng)
degree = 3
largeN = 1000 + 33
# this is large to check that chunking of the RBFInterpolator is tested
x = seq.random(50)
xitp = seq.random(largeN)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
y = P.dot(poly_coeffs)
yitp1 = Pitp.dot(poly_coeffs)
interp = self.build(x, y, degree=degree)
ce_real = interp._chunk_evaluator
def _chunk_evaluator(*args, **kwargs):
kwargs.update(memory_budget=100)
return ce_real(*args, **kwargs)
monkeypatch.setattr(interp, '_chunk_evaluator', _chunk_evaluator)
yitp2 = interp(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_vector_data(self):
# Make sure interpolating a vector field is the same as interpolating
# each component separately.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = np.array([_2d_test_function(x),
_2d_test_function(x[:, ::-1])]).T
yitp1 = self.build(x, y)(xitp)
yitp2 = self.build(x, y[:, 0])(xitp)
yitp3 = self.build(x, y[:, 1])(xitp)
xp_assert_close(yitp1[:, 0], yitp2)
xp_assert_close(yitp1[:, 1], yitp3)
def test_complex_data(self):
# Interpolating complex input should be the same as interpolating the
# real and complex components.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x) + 1j*_2d_test_function(x[:, ::-1])
yitp1 = self.build(x, y)(xitp)
yitp2 = self.build(x, y.real)(xitp)
yitp3 = self.build(x, y.imag)(xitp)
xp_assert_close(yitp1.real, yitp2)
xp_assert_close(yitp1.imag, yitp3)
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_interpolation_misfit_1d(self, kernel):
# Make sure that each kernel, with its default `degree` and an
# appropriate `epsilon`, does a good job at interpolation in 1d.
seq = Halton(1, scramble=False, seed=np.random.RandomState())
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
ytrue = _1d_test_function(xitp)
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
mse = np.mean((yitp - ytrue)**2)
assert mse < 1.0e-4
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_interpolation_misfit_2d(self, kernel):
# Make sure that each kernel, with its default `degree` and an
# appropriate `epsilon`, does a good job at interpolation in 2d.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
ytrue = _2d_test_function(xitp)
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
mse = np.mean((yitp - ytrue)**2)
assert mse < 2.0e-4
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
def test_smoothing_misfit(self, kernel):
# Make sure we can find a smoothing parameter for each kernel that
# removes a sufficient amount of noise.
rng = np.random.RandomState(0)
seq = Halton(1, scramble=False, seed=rng)
noise = 0.2
rmse_tol = 0.1
smoothing_range = 10**np.linspace(-4, 1, 20)
x = 3*seq.random(100)
y = _1d_test_function(x) + rng.normal(0.0, noise, (100,))
ytrue = _1d_test_function(x)
rmse_within_tol = False
for smoothing in smoothing_range:
ysmooth = self.build(
x, y,
epsilon=1.0,
smoothing=smoothing,
kernel=kernel)(x)
rmse = np.sqrt(np.mean((ysmooth - ytrue)**2))
if rmse < rmse_tol:
rmse_within_tol = True
break
assert rmse_within_tol
def test_array_smoothing(self):
# Test using an array for `smoothing` to give less weight to a known
# outlier.
rng = np.random.RandomState(0)
seq = Halton(1, scramble=False, seed=rng)
degree = 2
x = seq.random(50)
P = _vandermonde(x, degree)
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
y = P.dot(poly_coeffs)
y_with_outlier = np.copy(y)
y_with_outlier[10] += 1.0
smoothing = np.zeros((50,))
smoothing[10] = 1000.0
yitp = self.build(x, y_with_outlier, smoothing=smoothing)(x)
# Should be able to reproduce the uncorrupted data almost exactly.
xp_assert_close(yitp, y, atol=1e-4)
def test_inconsistent_x_dimensions_error(self):
# ValueError should be raised if the observation points and evaluation
# points have a different number of dimensions.
y = Halton(2, scramble=False, seed=np.random.RandomState()).random(10)
d = _2d_test_function(y)
x = Halton(1, scramble=False, seed=np.random.RandomState()).random(10)
match = 'Expected the second axis of `x`'
with pytest.raises(ValueError, match=match):
self.build(y, d)(x)
def test_inconsistent_d_length_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(1)
match = 'Expected the first axis of `d`'
with pytest.raises(ValueError, match=match):
self.build(y, d)
def test_y_not_2d_error(self):
y = np.linspace(0, 1, 5)
d = np.zeros(5)
match = '`y` must be a 2-dimensional array.'
with pytest.raises(ValueError, match=match):
self.build(y, d)
def test_inconsistent_smoothing_length_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
smoothing = np.ones(1)
match = 'Expected `smoothing` to be'
with pytest.raises(ValueError, match=match):
self.build(y, d, smoothing=smoothing)
def test_invalid_kernel_name_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
match = '`kernel` must be one of'
with pytest.raises(ValueError, match=match):
self.build(y, d, kernel='test')
def test_epsilon_not_specified_error(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
for kernel in _AVAILABLE:
if kernel in _SCALE_INVARIANT:
continue
match = '`epsilon` must be specified'
with pytest.raises(ValueError, match=match):
self.build(y, d, kernel=kernel)
def test_x_not_2d_error(self):
y = np.linspace(0, 1, 5)[:, None]
x = np.linspace(0, 1, 5)
d = np.zeros(5)
match = '`x` must be a 2-dimensional array.'
with pytest.raises(ValueError, match=match):
self.build(y, d)(x)
def test_not_enough_observations_error(self):
y = np.linspace(0, 1, 1)[:, None]
d = np.zeros(1)
match = 'At least 2 data points are required'
with pytest.raises(ValueError, match=match):
self.build(y, d, kernel='thin_plate_spline')
@pytest.mark.thread_unsafe
def test_degree_warning(self):
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
for kernel, deg in _NAME_TO_MIN_DEGREE.items():
# Only test for kernels that its minimum degree is not 0.
if deg >= 1:
match = f'`degree` should not be below {deg}'
with pytest.warns(Warning, match=match):
self.build(y, d, epsilon=1.0, kernel=kernel, degree=deg-1)
def test_minus_one_degree(self):
# Make sure a degree of -1 is accepted without any warning.
y = np.linspace(0, 1, 5)[:, None]
d = np.zeros(5)
for kernel, _ in _NAME_TO_MIN_DEGREE.items():
self.build(y, d, epsilon=1.0, kernel=kernel, degree=-1)
def test_rank_error(self):
# An error should be raised when `kernel` is "thin_plate_spline" and
# observations are 2-D and collinear.
y = np.array([[2.0, 0.0], [1.0, 0.0], [0.0, 0.0]])
d = np.array([0.0, 0.0, 0.0])
match = 'does not have full column rank'
with pytest.raises(LinAlgError, match=match):
self.build(y, d, kernel='thin_plate_spline')(y)
def test_single_point(self):
# Make sure interpolation still works with only one point (in 1, 2, and
# 3 dimensions).
for dim in [1, 2, 3]:
y = np.zeros((1, dim))
d = np.ones((1,))
f = self.build(y, d, kernel='linear')(y)
xp_assert_close(d, f)
def test_pickleable(self):
# Make sure we can pickle and unpickle the interpolant without any
# changes in the behavior.
seq = Halton(1, scramble=False, seed=np.random.RandomState(2305982309))
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
interp = self.build(x, y)
yitp1 = interp(xitp)
yitp2 = pickle.loads(pickle.dumps(interp))(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-16)
class TestRBFInterpolatorNeighborsNone(_TestRBFInterpolator):
def build(self, *args, **kwargs):
return RBFInterpolator(*args, **kwargs)
def test_smoothing_limit_1d(self):
# For large smoothing parameters, the interpolant should approach a
# least squares fit of a polynomial with the specified degree.
seq = Halton(1, scramble=False, seed=np.random.RandomState())
degree = 3
smoothing = 1e8
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
yitp1 = self.build(
x, y,
degree=degree,
smoothing=smoothing
)(xitp)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_smoothing_limit_2d(self):
# For large smoothing parameters, the interpolant should approach a
# least squares fit of a polynomial with the specified degree.
seq = Halton(2, scramble=False, seed=np.random.RandomState())
degree = 3
smoothing = 1e8
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
yitp1 = self.build(
x, y,
degree=degree,
smoothing=smoothing
)(xitp)
P = _vandermonde(x, degree)
Pitp = _vandermonde(xitp, degree)
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
xp_assert_close(yitp1, yitp2, atol=1e-8)
class TestRBFInterpolatorNeighbors20(_TestRBFInterpolator):
# RBFInterpolator using 20 nearest neighbors.
def build(self, *args, **kwargs):
return RBFInterpolator(*args, **kwargs, neighbors=20)
def test_equivalent_to_rbf_interpolator(self):
seq = Halton(2, scramble=False, seed=np.random.RandomState())
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
yitp1 = self.build(x, y)(xitp)
yitp2 = []
tree = cKDTree(x)
for xi in xitp:
_, nbr = tree.query(xi, 20)
yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
xp_assert_close(yitp1, yitp2, atol=1e-8)
def test_concurrency(self):
# Check that no segfaults appear with concurrent access to
# RbfInterpolator
seq = Halton(2, scramble=False, seed=np.random.RandomState(0))
x = seq.random(100)
xitp = seq.random(100)
y = _2d_test_function(x)
interp = self.build(x, y)
def worker_fn(_, interp, xp):
interp(xp)
_run_concurrent_barrier(10, worker_fn, interp, xitp)
class TestRBFInterpolatorNeighborsInf(TestRBFInterpolatorNeighborsNone):
# RBFInterpolator using neighbors=np.inf. This should give exactly the same
# results as neighbors=None, but it will be slower.
def build(self, *args, **kwargs):
return RBFInterpolator(*args, **kwargs, neighbors=np.inf)
def test_equivalent_to_rbf_interpolator(self):
seq = Halton(1, scramble=False, seed=np.random.RandomState())
x = 3*seq.random(50)
xitp = 3*seq.random(50)
y = _1d_test_function(x)
yitp1 = self.build(x, y)(xitp)
yitp2 = RBFInterpolator(x, y)(xitp)
xp_assert_close(yitp1, yitp2, atol=1e-8)

File diff suppressed because it is too large Load diff