up follow livre
This commit is contained in:
parent
b4b4398bb0
commit
3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,368 @@
|
|||
# Copyright (c) 2017, The Chancellor, Masters and Scholars of the University
|
||||
# of Oxford, and the Chebfun Developers. All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# * Neither the name of the University of Oxford nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
from math import factorial
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose, assert_equal, assert_array_less
|
||||
import pytest
|
||||
import scipy
|
||||
from scipy.interpolate import AAA, FloaterHormannInterpolator, BarycentricInterpolator
|
||||
|
||||
TOL = 1e4 * np.finfo(np.float64).eps
|
||||
UNIT_INTERVAL = np.linspace(-1, 1, num=1000)
|
||||
PTS = np.logspace(-15, 0, base=10, num=500)
|
||||
PTS = np.concatenate([-PTS[::-1], [0], PTS])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("method", [AAA, FloaterHormannInterpolator])
|
||||
@pytest.mark.parametrize("dtype", [np.float32, np.float64, np.complex64, np.complex128])
|
||||
def test_dtype_preservation(method, dtype):
|
||||
rtol = np.finfo(dtype).eps ** 0.75 * 100
|
||||
if method is FloaterHormannInterpolator:
|
||||
rtol *= 100
|
||||
rng = np.random.default_rng(59846294526092468)
|
||||
|
||||
z = np.linspace(-1, 1, dtype=dtype)
|
||||
r = method(z, np.sin(z))
|
||||
|
||||
z2 = rng.uniform(-1, 1, size=100).astype(dtype)
|
||||
assert_allclose(r(z2), np.sin(z2), rtol=rtol)
|
||||
assert r(z2).dtype == dtype
|
||||
|
||||
if method is AAA:
|
||||
assert r.support_points.dtype == dtype
|
||||
assert r.support_values.dtype == dtype
|
||||
assert r.errors.dtype == z.real.dtype
|
||||
assert r.weights.dtype == dtype
|
||||
assert r.poles().dtype == np.result_type(dtype, 1j)
|
||||
assert r.residues().dtype == np.result_type(dtype, 1j)
|
||||
assert r.roots().dtype == np.result_type(dtype, 1j)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("method", [AAA, FloaterHormannInterpolator])
|
||||
@pytest.mark.parametrize("dtype", [np.int16, np.int32, np.int64])
|
||||
def test_integer_promotion(method, dtype):
|
||||
z = np.arange(10, dtype=dtype)
|
||||
r = method(z, z)
|
||||
assert r.weights.dtype == np.result_type(dtype, 1.0)
|
||||
if method is AAA:
|
||||
assert r.support_points.dtype == np.result_type(dtype, 1.0)
|
||||
assert r.support_values.dtype == np.result_type(dtype, 1.0)
|
||||
assert r.errors.dtype == np.result_type(dtype, 1.0)
|
||||
assert r.poles().dtype == np.result_type(dtype, 1j)
|
||||
assert r.residues().dtype == np.result_type(dtype, 1j)
|
||||
assert r.roots().dtype == np.result_type(dtype, 1j)
|
||||
|
||||
assert r(z).dtype == np.result_type(dtype, 1.0)
|
||||
|
||||
|
||||
class TestAAA:
|
||||
def test_input_validation(self):
|
||||
with pytest.raises(ValueError, match="same size"):
|
||||
AAA([0], [1, 1])
|
||||
with pytest.raises(ValueError, match="1-D"):
|
||||
AAA([[0], [0]], [[1], [1]])
|
||||
with pytest.raises(ValueError, match="finite"):
|
||||
AAA([np.inf], [1])
|
||||
with pytest.raises(TypeError):
|
||||
AAA([1], [1], max_terms=1.0)
|
||||
with pytest.raises(ValueError, match="greater"):
|
||||
AAA([1], [1], max_terms=-1)
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_convergence_error(self):
|
||||
with pytest.warns(RuntimeWarning, match="AAA failed"):
|
||||
AAA(UNIT_INTERVAL, np.exp(UNIT_INTERVAL), max_terms=1)
|
||||
|
||||
# The following tests are based on:
|
||||
# https://github.com/chebfun/chebfun/blob/master/tests/chebfun/test_aaa.m
|
||||
def test_exp(self):
|
||||
f = np.exp(UNIT_INTERVAL)
|
||||
r = AAA(UNIT_INTERVAL, f)
|
||||
|
||||
assert_allclose(r(UNIT_INTERVAL), f, atol=TOL)
|
||||
assert_equal(r(np.nan), np.nan)
|
||||
assert np.isfinite(r(np.inf))
|
||||
|
||||
m1 = r.support_points.size
|
||||
r = AAA(UNIT_INTERVAL, f, rtol=1e-3)
|
||||
assert r.support_points.size < m1
|
||||
|
||||
def test_tan(self):
|
||||
f = np.tan(np.pi * UNIT_INTERVAL)
|
||||
r = AAA(UNIT_INTERVAL, f)
|
||||
|
||||
assert_allclose(r(UNIT_INTERVAL), f, atol=10 * TOL, rtol=1.4e-7)
|
||||
assert_allclose(np.min(np.abs(r.roots())), 0, atol=3e-10)
|
||||
assert_allclose(np.min(np.abs(r.poles() - 0.5)), 0, atol=TOL)
|
||||
# Test for spurious poles (poles with tiny residue are likely spurious)
|
||||
assert np.min(np.abs(r.residues())) > 1e-13
|
||||
|
||||
def test_short_cases(self):
|
||||
# Computed using Chebfun:
|
||||
# >> format long
|
||||
# >> [r, pol, res, zer, zj, fj, wj, errvec] = aaa([1 2], [0 1])
|
||||
z = np.array([0, 1])
|
||||
f = np.array([1, 2])
|
||||
r = AAA(z, f, rtol=1e-13)
|
||||
assert_allclose(r(z), f, atol=TOL)
|
||||
assert_allclose(r.poles(), 0.5)
|
||||
assert_allclose(r.residues(), 0.25)
|
||||
assert_allclose(r.roots(), 1/3)
|
||||
assert_equal(r.support_points, z)
|
||||
assert_equal(r.support_values, f)
|
||||
assert_allclose(r.weights, [0.707106781186547, 0.707106781186547])
|
||||
assert_equal(r.errors, [1, 0])
|
||||
|
||||
# >> format long
|
||||
# >> [r, pol, res, zer, zj, fj, wj, errvec] = aaa([1 0 0], [0 1 2])
|
||||
z = np.array([0, 1, 2])
|
||||
f = np.array([1, 0, 0])
|
||||
r = AAA(z, f, rtol=1e-13)
|
||||
assert_allclose(r(z), f, atol=TOL)
|
||||
assert_allclose(np.sort(r.poles()),
|
||||
np.sort([1.577350269189626, 0.422649730810374]))
|
||||
assert_allclose(np.sort(r.residues()),
|
||||
np.sort([-0.070441621801729, -0.262891711531604]))
|
||||
assert_allclose(np.sort(r.roots()), np.sort([2, 1]))
|
||||
assert_equal(r.support_points, z)
|
||||
assert_equal(r.support_values, f)
|
||||
assert_allclose(r.weights, [0.577350269189626, 0.577350269189626,
|
||||
0.577350269189626])
|
||||
assert_equal(r.errors, [1, 1, 0])
|
||||
|
||||
def test_scale_invariance(self):
|
||||
z = np.linspace(0.3, 1.5)
|
||||
f = np.exp(z) / (1 + 1j)
|
||||
r1 = AAA(z, f)
|
||||
r2 = AAA(z, (2**311 * f).astype(np.complex128))
|
||||
r3 = AAA(z, (2**-311 * f).astype(np.complex128))
|
||||
assert_equal(r1(0.2j), 2**-311 * r2(0.2j))
|
||||
assert_equal(r1(1.4), 2**311 * r3(1.4))
|
||||
|
||||
def test_log_func(self):
|
||||
rng = np.random.default_rng(1749382759832758297)
|
||||
z = rng.standard_normal(10000) + 3j * rng.standard_normal(10000)
|
||||
|
||||
def f(z):
|
||||
return np.log(5 - z) / (1 + z**2)
|
||||
|
||||
r = AAA(z, f(z))
|
||||
assert_allclose(r(0), f(0), atol=TOL)
|
||||
|
||||
def test_infinite_data(self):
|
||||
z = np.linspace(-1, 1)
|
||||
r = AAA(z, scipy.special.gamma(z))
|
||||
assert_allclose(r(0.63), scipy.special.gamma(0.63), atol=1e-15)
|
||||
|
||||
def test_nan(self):
|
||||
x = np.linspace(0, 20)
|
||||
with np.errstate(invalid="ignore"):
|
||||
f = np.sin(x) / x
|
||||
r = AAA(x, f)
|
||||
assert_allclose(r(2), np.sin(2) / 2, atol=1e-15)
|
||||
|
||||
def test_residues(self):
|
||||
x = np.linspace(-1.337, 2, num=537)
|
||||
r = AAA(x, np.exp(x) / x)
|
||||
ii = np.flatnonzero(np.abs(r.poles()) < 1e-8)
|
||||
assert_allclose(r.residues()[ii], 1, atol=1e-15)
|
||||
|
||||
r = AAA(x, (1 + 1j) * scipy.special.gamma(x))
|
||||
ii = np.flatnonzero(abs(r.poles() - (-1)) < 1e-8)
|
||||
assert_allclose(r.residues()[ii], -1 - 1j, atol=1e-15)
|
||||
|
||||
# The following tests are based on:
|
||||
# https://github.com/complexvariables/RationalFunctionApproximation.jl/blob/main/test/interval.jl
|
||||
@pytest.mark.parametrize("func,atol,rtol",
|
||||
[(lambda x: np.abs(x + 0.5 + 0.01j), 5e-13, 1e-7),
|
||||
(lambda x: np.sin(1/(1.05 - x)), 2e-13, 1e-7),
|
||||
(lambda x: np.exp(-1/(x**2)), 3.5e-12, 0),
|
||||
(lambda x: np.exp(-100*x**2), 2e-12, 0),
|
||||
(lambda x: np.exp(-10/(1.2 - x)), 1e-14, 0),
|
||||
(lambda x: 1/(1+np.exp(100*(x + 0.5))), 2e-13, 1e-7),
|
||||
(lambda x: np.abs(x - 0.95), 1e-6, 1e-7)])
|
||||
def test_basic_functions(self, func, atol, rtol):
|
||||
with np.errstate(divide="ignore"):
|
||||
f = func(PTS)
|
||||
assert_allclose(AAA(UNIT_INTERVAL, func(UNIT_INTERVAL))(PTS),
|
||||
f, atol=atol, rtol=rtol)
|
||||
|
||||
def test_poles_zeros_residues(self):
|
||||
def f(z):
|
||||
return (z+1) * (z+2) / ((z+3) * (z+4))
|
||||
r = AAA(UNIT_INTERVAL, f(UNIT_INTERVAL))
|
||||
assert_allclose(np.sum(r.poles() + r.roots()), -10, atol=1e-12)
|
||||
|
||||
def f(z):
|
||||
return 2/(3 + z) + 5/(z - 2j)
|
||||
r = AAA(UNIT_INTERVAL, f(UNIT_INTERVAL))
|
||||
assert_allclose(r.residues().prod(), 10, atol=1e-8)
|
||||
|
||||
r = AAA(UNIT_INTERVAL, np.sin(10*np.pi*UNIT_INTERVAL))
|
||||
assert_allclose(np.sort(np.abs(r.roots()))[18], 0.9, atol=1e-12)
|
||||
|
||||
def f(z):
|
||||
return (z - (3 + 3j))/(z + 2)
|
||||
r = AAA(UNIT_INTERVAL, f(UNIT_INTERVAL))
|
||||
assert_allclose(r.poles()[0]*r.roots()[0], -6-6j, atol=1e-12)
|
||||
|
||||
@pytest.mark.parametrize("func",
|
||||
[lambda z: np.zeros_like(z), lambda z: z, lambda z: 1j*z,
|
||||
lambda z: z**2 + z, lambda z: z**3 + z,
|
||||
lambda z: 1/(1.1 + z), lambda z: 1/(1 + 1j*z),
|
||||
lambda z: 1/(3 + z + z**2), lambda z: 1/(1.01 + z**3)])
|
||||
def test_polynomials_and_reciprocals(self, func):
|
||||
assert_allclose(AAA(UNIT_INTERVAL, func(UNIT_INTERVAL))(PTS),
|
||||
func(PTS), atol=2e-13)
|
||||
|
||||
# The following tests are taken from:
|
||||
# https://github.com/macd/BaryRational.jl/blob/main/test/test_aaa.jl
|
||||
def test_spiral(self):
|
||||
z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, num=1000))
|
||||
r = AAA(z, np.tan(np.pi*z/2))
|
||||
assert_allclose(np.sort(np.abs(r.poles()))[:4], [1, 1, 3, 3], rtol=9e-7)
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_spiral_cleanup(self):
|
||||
z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, num=1000))
|
||||
# here we set `rtol=0` to force froissart doublets, without cleanup there
|
||||
# are many spurious poles
|
||||
with pytest.warns(RuntimeWarning):
|
||||
r = AAA(z, np.tan(np.pi*z/2), rtol=0, max_terms=60, clean_up=False)
|
||||
n_spurious = np.sum(np.abs(r.residues()) < 1e-14)
|
||||
with pytest.warns(RuntimeWarning):
|
||||
assert r.clean_up() >= 1
|
||||
# check there are less potentially spurious poles than before
|
||||
assert np.sum(np.abs(r.residues()) < 1e-14) < n_spurious
|
||||
# check accuracy
|
||||
assert_allclose(r(z), np.tan(np.pi*z/2), atol=6e-12, rtol=3e-12)
|
||||
|
||||
|
||||
class TestFloaterHormann:
|
||||
def runge(self, z):
|
||||
return 1/(1 + z**2)
|
||||
|
||||
def scale(self, n, d):
|
||||
return (-1)**(np.arange(n) + d) * factorial(d)
|
||||
|
||||
def test_iv(self):
|
||||
with pytest.raises(ValueError, match="`x`"):
|
||||
FloaterHormannInterpolator([[0]], [0], d=0)
|
||||
with pytest.raises(ValueError, match="`y`"):
|
||||
FloaterHormannInterpolator([0], 0, d=0)
|
||||
with pytest.raises(ValueError, match="dimension"):
|
||||
FloaterHormannInterpolator([0], [[1, 1], [1, 1]], d=0)
|
||||
with pytest.raises(ValueError, match="finite"):
|
||||
FloaterHormannInterpolator([np.inf], [1], d=0)
|
||||
with pytest.raises(ValueError, match="`d`"):
|
||||
FloaterHormannInterpolator([0], [0], d=-1)
|
||||
with pytest.raises(ValueError, match="`d`"):
|
||||
FloaterHormannInterpolator([0], [0], d=10)
|
||||
with pytest.raises(TypeError):
|
||||
FloaterHormannInterpolator([0], [0], d=0.0)
|
||||
|
||||
# reference values from Floater and Hormann 2007 page 8.
|
||||
@pytest.mark.parametrize("d,expected", [
|
||||
(0, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
|
||||
(1, [1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]),
|
||||
(2, [1, 3, 4, 4, 4, 4, 4, 4, 4, 3, 1]),
|
||||
(3, [1, 4, 7, 8, 8, 8, 8, 8, 7, 4, 1]),
|
||||
(4, [1, 5, 11, 15, 16, 16, 16, 15, 11, 5, 1])
|
||||
])
|
||||
def test_uniform_grid(self, d, expected):
|
||||
# Check against explicit results on an uniform grid
|
||||
x = np.arange(11)
|
||||
r = FloaterHormannInterpolator(x, 0.0*x, d=d)
|
||||
assert_allclose(r.weights.ravel()*self.scale(x.size, d), expected,
|
||||
rtol=1e-15, atol=1e-15)
|
||||
|
||||
@pytest.mark.parametrize("d", range(10))
|
||||
def test_runge(self, d):
|
||||
x = np.linspace(0, 1, 51)
|
||||
rng = np.random.default_rng(802754237598370893)
|
||||
xx = rng.uniform(0, 1, size=1000)
|
||||
y = self.runge(x)
|
||||
h = x[1] - x[0]
|
||||
|
||||
r = FloaterHormannInterpolator(x, y, d=d)
|
||||
|
||||
tol = 10*h**(d+1)
|
||||
assert_allclose(r(xx), self.runge(xx), atol=1e-10, rtol=tol)
|
||||
# check interpolation property
|
||||
assert_equal(r(x), self.runge(x))
|
||||
|
||||
def test_complex(self):
|
||||
x = np.linspace(-1, 1)
|
||||
z = x + x*1j
|
||||
r = FloaterHormannInterpolator(z, np.sin(z), d=12)
|
||||
xx = np.linspace(-1, 1, num=1000)
|
||||
zz = xx + xx*1j
|
||||
assert_allclose(r(zz), np.sin(zz), rtol=1e-12)
|
||||
|
||||
def test_polyinterp(self):
|
||||
# check that when d=n-1 FH gives a polynomial interpolant
|
||||
x = np.linspace(0, 1, 11)
|
||||
xx = np.linspace(0, 1, 1001)
|
||||
y = np.sin(x)
|
||||
r = FloaterHormannInterpolator(x, y, d=x.size-1)
|
||||
p = BarycentricInterpolator(x, y)
|
||||
assert_allclose(r(xx), p(xx), rtol=1e-12, atol=1e-12)
|
||||
|
||||
@pytest.mark.parametrize("y_shape", [(2,), (2, 3, 1), (1, 5, 6, 4)])
|
||||
@pytest.mark.parametrize("xx_shape", [(100), (10, 10)])
|
||||
def test_trailing_dim(self, y_shape, xx_shape):
|
||||
x = np.linspace(0, 1)
|
||||
y = np.broadcast_to(
|
||||
np.expand_dims(np.sin(x), tuple(range(1, len(y_shape) + 1))),
|
||||
x.shape + y_shape
|
||||
)
|
||||
|
||||
r = FloaterHormannInterpolator(x, y)
|
||||
|
||||
rng = np.random.default_rng(897138947238097528091759187597)
|
||||
xx = rng.random(xx_shape)
|
||||
yy = np.broadcast_to(
|
||||
np.expand_dims(np.sin(xx), tuple(range(xx.ndim, len(y_shape) + xx.ndim))),
|
||||
xx.shape + y_shape
|
||||
)
|
||||
rr = r(xx)
|
||||
assert rr.shape == xx.shape + y_shape
|
||||
assert_allclose(rr, yy, rtol=1e-6)
|
||||
|
||||
def test_zeros(self):
|
||||
x = np.linspace(0, 10, num=100)
|
||||
r = FloaterHormannInterpolator(x, np.sin(np.pi*x))
|
||||
|
||||
err = np.abs(np.subtract.outer(r.roots(), np.arange(11))).min(axis=0)
|
||||
assert_array_less(err, 1e-5)
|
||||
|
||||
def test_no_poles(self):
|
||||
x = np.linspace(-1, 1)
|
||||
r = FloaterHormannInterpolator(x, 1/x**2)
|
||||
p = r.poles()
|
||||
mask = (p.real >= -1) & (p.real <= 1) & (np.abs(p.imag) < 1.e-12)
|
||||
assert np.sum(mask) == 0
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,519 @@
|
|||
import itertools
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from scipy._lib._array_api import (
|
||||
xp_assert_equal, xp_assert_close, assert_almost_equal, assert_array_almost_equal
|
||||
)
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
from scipy._lib._testutils import check_free_memory
|
||||
|
||||
from scipy.interpolate import RectBivariateSpline
|
||||
from scipy.interpolate import make_splrep
|
||||
|
||||
from scipy.interpolate._fitpack_py import (splrep, splev, bisplrep, bisplev,
|
||||
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
|
||||
from scipy.interpolate._dfitpack import regrid_smth
|
||||
from scipy.interpolate._fitpack2 import dfitpack_int
|
||||
|
||||
|
||||
def data_file(basename):
|
||||
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'data', basename)
|
||||
|
||||
|
||||
def norm2(x):
|
||||
return np.sqrt(np.dot(x.T, x))
|
||||
|
||||
|
||||
def f1(x, d=0):
|
||||
"""Derivatives of sin->cos->-sin->-cos."""
|
||||
if d % 4 == 0:
|
||||
return np.sin(x)
|
||||
if d % 4 == 1:
|
||||
return np.cos(x)
|
||||
if d % 4 == 2:
|
||||
return -np.sin(x)
|
||||
if d % 4 == 3:
|
||||
return -np.cos(x)
|
||||
|
||||
|
||||
def makepairs(x, y):
|
||||
"""Helper function to create an array of pairs of x and y."""
|
||||
xy = np.array(list(itertools.product(np.asarray(x), np.asarray(y))))
|
||||
return xy.T
|
||||
|
||||
|
||||
class TestSmokeTests:
|
||||
"""
|
||||
Smoke tests (with a few asserts) for fitpack routines -- mostly
|
||||
check that they are runnable
|
||||
"""
|
||||
def check_1(self, per=0, s=0, a=0, b=2*np.pi, at_nodes=False,
|
||||
xb=None, xe=None):
|
||||
if xb is None:
|
||||
xb = a
|
||||
if xe is None:
|
||||
xe = b
|
||||
|
||||
N = 20
|
||||
# nodes and middle points of the nodes
|
||||
x = np.linspace(a, b, N + 1)
|
||||
x1 = a + (b - a) * np.arange(1, N, dtype=float) / float(N - 1)
|
||||
v = f1(x)
|
||||
|
||||
def err_est(k, d):
|
||||
# Assume f has all derivatives < 1
|
||||
h = 1.0 / N
|
||||
tol = 5 * h**(.75*(k-d))
|
||||
if s > 0:
|
||||
tol += 1e5*s
|
||||
return tol
|
||||
|
||||
for k in range(1, 6):
|
||||
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
|
||||
tt = tck[0][k:-k] if at_nodes else x1
|
||||
|
||||
for d in range(k+1):
|
||||
tol = err_est(k, d)
|
||||
err = norm2(f1(tt, d) - splev(tt, tck, d)) / norm2(f1(tt, d))
|
||||
assert err < tol
|
||||
|
||||
# smoke test make_splrep
|
||||
if not per:
|
||||
spl = make_splrep(x, v, k=k, s=s, xb=xb, xe=xe)
|
||||
if len(spl.t) == len(tck[0]):
|
||||
xp_assert_close(spl.t, tck[0], atol=1e-15)
|
||||
xp_assert_close(spl.c, tck[1][:spl.c.size], atol=1e-13)
|
||||
else:
|
||||
assert k == 5 # knot length differ in some k=5 cases
|
||||
|
||||
def check_2(self, per=0, N=20, ia=0, ib=2*np.pi):
|
||||
a, b, dx = 0, 2*np.pi, 0.2*np.pi
|
||||
x = np.linspace(a, b, N+1) # nodes
|
||||
v = np.sin(x)
|
||||
|
||||
def err_est(k, d):
|
||||
# Assume f has all derivatives < 1
|
||||
h = 1.0 / N
|
||||
tol = 5 * h**(.75*(k-d))
|
||||
return tol
|
||||
|
||||
nk = []
|
||||
for k in range(1, 6):
|
||||
tck = splrep(x, v, s=0, per=per, k=k, xe=b)
|
||||
nk.append([splint(ia, ib, tck), spalde(dx, tck)])
|
||||
|
||||
k = 1
|
||||
for r in nk:
|
||||
d = 0
|
||||
for dr in r[1]:
|
||||
tol = err_est(k, d)
|
||||
xp_assert_close(dr, f1(dx, d), atol=0, rtol=tol)
|
||||
d = d+1
|
||||
k = k+1
|
||||
|
||||
def test_smoke_splrep_splev(self):
|
||||
self.check_1(s=1e-6)
|
||||
self.check_1(b=1.5*np.pi)
|
||||
self.check_1(b=1.5*np.pi, xe=2*np.pi, per=1, s=1e-1)
|
||||
|
||||
@pytest.mark.parametrize('per', [0, 1])
|
||||
@pytest.mark.parametrize('at_nodes', [True, False])
|
||||
def test_smoke_splrep_splev_2(self, per, at_nodes):
|
||||
self.check_1(per=per, at_nodes=at_nodes)
|
||||
|
||||
@pytest.mark.parametrize('N', [20, 50])
|
||||
@pytest.mark.parametrize('per', [0, 1])
|
||||
def test_smoke_splint_spalde(self, N, per):
|
||||
self.check_2(per=per, N=N)
|
||||
|
||||
@pytest.mark.parametrize('N', [20, 50])
|
||||
@pytest.mark.parametrize('per', [0, 1])
|
||||
def test_smoke_splint_spalde_iaib(self, N, per):
|
||||
self.check_2(ia=0.2*np.pi, ib=np.pi, N=N, per=per)
|
||||
|
||||
def test_smoke_sproot(self):
|
||||
# sproot is only implemented for k=3
|
||||
a, b = 0.1, 15
|
||||
x = np.linspace(a, b, 20)
|
||||
v = np.sin(x)
|
||||
|
||||
for k in [1, 2, 4, 5]:
|
||||
tck = splrep(x, v, s=0, per=0, k=k, xe=b)
|
||||
with assert_raises(ValueError):
|
||||
sproot(tck)
|
||||
|
||||
k = 3
|
||||
tck = splrep(x, v, s=0, k=3)
|
||||
roots = sproot(tck)
|
||||
xp_assert_close(splev(roots, tck), np.zeros(len(roots)), atol=1e-10, rtol=1e-10)
|
||||
xp_assert_close(roots, np.pi * np.array([1, 2, 3, 4]), rtol=1e-3)
|
||||
|
||||
@pytest.mark.parametrize('N', [20, 50])
|
||||
@pytest.mark.parametrize('k', [1, 2, 3, 4, 5])
|
||||
def test_smoke_splprep_splrep_splev(self, N, k):
|
||||
a, b, dx = 0, 2.*np.pi, 0.2*np.pi
|
||||
x = np.linspace(a, b, N+1) # nodes
|
||||
v = np.sin(x)
|
||||
|
||||
tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
|
||||
uv = splev(dx, tckp)
|
||||
err1 = abs(uv[1] - np.sin(uv[0]))
|
||||
assert err1 < 1e-2
|
||||
|
||||
tck = splrep(x, v, s=0, per=0, k=k)
|
||||
err2 = abs(splev(uv[0], tck) - np.sin(uv[0]))
|
||||
assert err2 < 1e-2
|
||||
|
||||
# Derivatives of parametric cubic spline at u (first function)
|
||||
if k == 3:
|
||||
tckp, u = splprep([x, v], s=0, per=0, k=k, nest=-1)
|
||||
for d in range(1, k+1):
|
||||
uv = splev(dx, tckp, d)
|
||||
|
||||
def test_smoke_bisplrep_bisplev(self):
|
||||
xb, xe = 0, 2.*np.pi
|
||||
yb, ye = 0, 2.*np.pi
|
||||
kx, ky = 3, 3
|
||||
Nx, Ny = 20, 20
|
||||
|
||||
def f2(x, y):
|
||||
return np.sin(x+y)
|
||||
|
||||
x = np.linspace(xb, xe, Nx + 1)
|
||||
y = np.linspace(yb, ye, Ny + 1)
|
||||
xy = makepairs(x, y)
|
||||
tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
|
||||
|
||||
tt = [tck[0][kx:-kx], tck[1][ky:-ky]]
|
||||
t2 = makepairs(tt[0], tt[1])
|
||||
v1 = bisplev(tt[0], tt[1], tck)
|
||||
v2 = f2(t2[0], t2[1])
|
||||
v2.shape = len(tt[0]), len(tt[1])
|
||||
|
||||
assert norm2(np.ravel(v1 - v2)) < 1e-2
|
||||
|
||||
|
||||
class TestSplev:
|
||||
def test_1d_shape(self):
|
||||
x = [1,2,3,4,5]
|
||||
y = [4,5,6,7,8]
|
||||
tck = splrep(x, y)
|
||||
z = splev([1], tck)
|
||||
assert z.shape == (1,)
|
||||
z = splev(1, tck)
|
||||
assert z.shape == ()
|
||||
|
||||
def test_2d_shape(self):
|
||||
x = [1, 2, 3, 4, 5]
|
||||
y = [4, 5, 6, 7, 8]
|
||||
tck = splrep(x, y)
|
||||
t = np.array([[1.0, 1.5, 2.0, 2.5],
|
||||
[3.0, 3.5, 4.0, 4.5]])
|
||||
z = splev(t, tck)
|
||||
z0 = splev(t[0], tck)
|
||||
z1 = splev(t[1], tck)
|
||||
xp_assert_equal(z, np.vstack((z0, z1)))
|
||||
|
||||
def test_extrapolation_modes(self):
|
||||
# test extrapolation modes
|
||||
# * if ext=0, return the extrapolated value.
|
||||
# * if ext=1, return 0
|
||||
# * if ext=2, raise a ValueError
|
||||
# * if ext=3, return the boundary value.
|
||||
x = [1,2,3]
|
||||
y = [0,2,4]
|
||||
tck = splrep(x, y, k=1)
|
||||
|
||||
rstl = [[-2, 6], [0, 0], None, [0, 4]]
|
||||
for ext in (0, 1, 3):
|
||||
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
|
||||
|
||||
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
|
||||
|
||||
|
||||
class TestSplder:
|
||||
def setup_method(self):
|
||||
# non-uniform grid, just to make it sure
|
||||
x = np.linspace(0, 1, 100)**3
|
||||
y = np.sin(20 * x)
|
||||
self.spl = splrep(x, y)
|
||||
|
||||
# double check that knots are non-uniform
|
||||
assert np.ptp(np.diff(self.spl[0])) > 0
|
||||
|
||||
def test_inverse(self):
|
||||
# Check that antiderivative + derivative is identity.
|
||||
for n in range(5):
|
||||
spl2 = splantider(self.spl, n)
|
||||
spl3 = splder(spl2, n)
|
||||
xp_assert_close(self.spl[0], spl3[0])
|
||||
xp_assert_close(self.spl[1], spl3[1])
|
||||
assert self.spl[2] == spl3[2]
|
||||
|
||||
def test_splder_vs_splev(self):
|
||||
# Check derivative vs. FITPACK
|
||||
|
||||
for n in range(3+1):
|
||||
# Also extrapolation!
|
||||
xx = np.linspace(-1, 2, 2000)
|
||||
if n == 3:
|
||||
# ... except that FITPACK extrapolates strangely for
|
||||
# order 0, so let's not check that.
|
||||
xx = xx[(xx >= 0) & (xx <= 1)]
|
||||
|
||||
dy = splev(xx, self.spl, n)
|
||||
spl2 = splder(self.spl, n)
|
||||
dy2 = splev(xx, spl2)
|
||||
if n == 1:
|
||||
xp_assert_close(dy, dy2, rtol=2e-6)
|
||||
else:
|
||||
xp_assert_close(dy, dy2)
|
||||
|
||||
def test_splantider_vs_splint(self):
|
||||
# Check antiderivative vs. FITPACK
|
||||
spl2 = splantider(self.spl)
|
||||
|
||||
# no extrapolation, splint assumes function is zero outside
|
||||
# range
|
||||
xx = np.linspace(0, 1, 20)
|
||||
|
||||
for x1 in xx:
|
||||
for x2 in xx:
|
||||
y1 = splint(x1, x2, self.spl)
|
||||
y2 = splev(x2, spl2) - splev(x1, spl2)
|
||||
xp_assert_close(np.asarray(y1), np.asarray(y2))
|
||||
|
||||
def test_order0_diff(self):
|
||||
assert_raises(ValueError, splder, self.spl, 4)
|
||||
|
||||
def test_kink(self):
|
||||
# Should refuse to differentiate splines with kinks
|
||||
|
||||
spl2 = insert(0.5, self.spl, m=2)
|
||||
splder(spl2, 2) # Should work
|
||||
assert_raises(ValueError, splder, spl2, 3)
|
||||
|
||||
spl2 = insert(0.5, self.spl, m=3)
|
||||
splder(spl2, 1) # Should work
|
||||
assert_raises(ValueError, splder, spl2, 2)
|
||||
|
||||
spl2 = insert(0.5, self.spl, m=4)
|
||||
assert_raises(ValueError, splder, spl2, 1)
|
||||
|
||||
def test_multidim(self):
|
||||
# c can have trailing dims
|
||||
for n in range(3):
|
||||
t, c, k = self.spl
|
||||
c2 = np.c_[c, c, c]
|
||||
c2 = np.dstack((c2, c2))
|
||||
|
||||
spl2 = splantider((t, c2, k), n)
|
||||
spl3 = splder(spl2, n)
|
||||
|
||||
xp_assert_close(t, spl3[0])
|
||||
xp_assert_close(c2, spl3[1])
|
||||
assert k == spl3[2]
|
||||
|
||||
|
||||
class TestSplint:
|
||||
def test_len_c(self):
|
||||
n, k = 7, 3
|
||||
x = np.arange(n)
|
||||
y = x**3
|
||||
t, c, k = splrep(x, y, s=0)
|
||||
|
||||
# note that len(c) == len(t) == 11 (== len(x) + 2*(k-1))
|
||||
assert len(t) == len(c) == n + 2*(k-1)
|
||||
|
||||
# integrate directly: $\int_0^6 x^3 dx = 6^4 / 4$
|
||||
res = splint(0, 6, (t, c, k))
|
||||
expected = 6**4 / 4
|
||||
assert abs(res - expected) < 1e-13
|
||||
|
||||
# check that the coefficients past len(t) - k - 1 are ignored
|
||||
c0 = c.copy()
|
||||
c0[len(t) - k - 1:] = np.nan
|
||||
res0 = splint(0, 6, (t, c0, k))
|
||||
assert abs(res0 - expected) < 1e-13
|
||||
|
||||
# however, all other coefficients *are* used
|
||||
c0[6] = np.nan
|
||||
assert np.isnan(splint(0, 6, (t, c0, k)))
|
||||
|
||||
# check that the coefficient array can have length `len(t) - k - 1`
|
||||
c1 = c[:len(t) - k - 1]
|
||||
res1 = splint(0, 6, (t, c1, k))
|
||||
assert (res1 - expected) < 1e-13
|
||||
|
||||
|
||||
# however shorter c arrays raise. The error from f2py is a
|
||||
# `dftipack.error`, which is an Exception but not ValueError etc.
|
||||
with assert_raises(Exception, match=r">=n-k-1"):
|
||||
splint(0, 1, (np.ones(10), np.ones(5), 3))
|
||||
|
||||
|
||||
class TestBisplrep:
|
||||
def test_overflow(self):
|
||||
from numpy.lib.stride_tricks import as_strided
|
||||
if dfitpack_int.itemsize == 8:
|
||||
size = 1500000**2
|
||||
else:
|
||||
size = 400**2
|
||||
# Don't allocate a real array, as it's very big, but rely
|
||||
# on that it's not referenced
|
||||
x = as_strided(np.zeros(()), shape=(size,))
|
||||
assert_raises(OverflowError, bisplrep, x, x, x, w=x,
|
||||
xb=0, xe=1, yb=0, ye=1, s=0)
|
||||
|
||||
def test_regression_1310(self):
|
||||
# Regression test for gh-1310
|
||||
with np.load(data_file('bug-1310.npz')) as loaded_data:
|
||||
data = loaded_data['data']
|
||||
|
||||
# Shouldn't crash -- the input data triggers work array sizes
|
||||
# that caused previously some data to not be aligned on
|
||||
# sizeof(double) boundaries in memory, which made the Fortran
|
||||
# code to crash when compiled with -O3
|
||||
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
|
||||
full_output=True)
|
||||
|
||||
@pytest.mark.skipif(dfitpack_int != np.int64, reason="needs ilp64 fitpack")
|
||||
def test_ilp64_bisplrep(self):
|
||||
check_free_memory(28000) # VM size, doesn't actually use the pages
|
||||
x = np.linspace(0, 1, 400)
|
||||
y = np.linspace(0, 1, 400)
|
||||
x, y = np.meshgrid(x, y)
|
||||
z = np.zeros_like(x)
|
||||
tck = bisplrep(x, y, z, kx=3, ky=3, s=0)
|
||||
xp_assert_close(bisplev(0.5, 0.5, tck), 0.0)
|
||||
|
||||
|
||||
def test_dblint():
|
||||
# Basic test to see it runs and gives the correct result on a trivial
|
||||
# problem. Note that `dblint` is not exposed in the interpolate namespace.
|
||||
x = np.linspace(0, 1)
|
||||
y = np.linspace(0, 1)
|
||||
xx, yy = np.meshgrid(x, y)
|
||||
rect = RectBivariateSpline(x, y, 4 * xx * yy)
|
||||
tck = list(rect.tck)
|
||||
tck.extend(rect.degrees)
|
||||
|
||||
assert abs(dblint(0, 1, 0, 1, tck) - 1) < 1e-10
|
||||
assert abs(dblint(0, 0.5, 0, 1, tck) - 0.25) < 1e-10
|
||||
assert abs(dblint(0.5, 1, 0, 1, tck) - 0.75) < 1e-10
|
||||
assert abs(dblint(-100, 100, -100, 100, tck) - 1) < 1e-10
|
||||
|
||||
|
||||
def test_splev_der_k():
|
||||
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
|
||||
# for x outside of knot range
|
||||
|
||||
# test case from gh-2188
|
||||
tck = (np.array([0., 0., 2.5, 2.5]),
|
||||
np.array([-1.56679978, 2.43995873, 0., 0.]),
|
||||
1)
|
||||
t, c, k = tck
|
||||
x = np.array([-3, 0, 2.5, 3])
|
||||
|
||||
# an explicit form of the linear spline
|
||||
xp_assert_close(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
|
||||
xp_assert_close(splev(x, tck, 1),
|
||||
np.ones_like(x) * (c[1] - c[0]) / t[2]
|
||||
)
|
||||
|
||||
# now check a random spline vs splder
|
||||
np.random.seed(1234)
|
||||
x = np.sort(np.random.random(30))
|
||||
y = np.random.random(30)
|
||||
t, c, k = splrep(x, y)
|
||||
|
||||
x = [t[0] - 1., t[-1] + 1.]
|
||||
tck2 = splder((t, c, k), k)
|
||||
xp_assert_close(splev(x, (t, c, k), k), splev(x, tck2))
|
||||
|
||||
|
||||
def test_splprep_segfault():
|
||||
# regression test for gh-3847: splprep segfaults if knots are specified
|
||||
# for task=-1
|
||||
t = np.arange(0, 1.1, 0.1)
|
||||
x = np.sin(2*np.pi*t)
|
||||
y = np.cos(2*np.pi*t)
|
||||
tck, u = splprep([x, y], s=0)
|
||||
np.arange(0, 1.01, 0.01)
|
||||
|
||||
uknots = tck[0] # using the knots from the previous fitting
|
||||
tck, u = splprep([x, y], task=-1, t=uknots) # here is the crash
|
||||
|
||||
|
||||
def test_bisplev_integer_overflow():
|
||||
np.random.seed(1)
|
||||
|
||||
x = np.linspace(0, 1, 11)
|
||||
y = x
|
||||
z = np.random.randn(11, 11).ravel()
|
||||
kx = 1
|
||||
ky = 1
|
||||
|
||||
nx, tx, ny, ty, c, fp, ier = regrid_smth(
|
||||
x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
|
||||
tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
|
||||
|
||||
xp = np.zeros([2621440])
|
||||
yp = np.zeros([2621440])
|
||||
|
||||
assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
|
||||
|
||||
|
||||
@pytest.mark.xslow
|
||||
def test_gh_1766():
|
||||
# this should fail gracefully instead of segfaulting (int overflow)
|
||||
size = 22
|
||||
kx, ky = 3, 3
|
||||
def f2(x, y):
|
||||
return np.sin(x+y)
|
||||
|
||||
x = np.linspace(0, 10, size)
|
||||
y = np.linspace(50, 700, size)
|
||||
xy = makepairs(x, y)
|
||||
tck = bisplrep(xy[0], xy[1], f2(xy[0], xy[1]), s=0, kx=kx, ky=ky)
|
||||
# the size value here can either segfault
|
||||
# or produce a MemoryError on main
|
||||
tx_ty_size = 500000
|
||||
tck[0] = np.arange(tx_ty_size)
|
||||
tck[1] = np.arange(tx_ty_size) * 4
|
||||
tt_0 = np.arange(50)
|
||||
tt_1 = np.arange(50) * 3
|
||||
with pytest.raises(MemoryError):
|
||||
bisplev(tt_0, tt_1, tck, 1, 1)
|
||||
|
||||
|
||||
def test_spalde_scalar_input():
|
||||
# Ticket #629
|
||||
x = np.linspace(0, 10)
|
||||
y = x**3
|
||||
tck = splrep(x, y, k=3, t=[5])
|
||||
res = spalde(np.float64(1), tck)
|
||||
des = np.array([1., 3., 6., 6.])
|
||||
assert_almost_equal(res, des)
|
||||
|
||||
|
||||
def test_spalde_nc():
|
||||
# regression test for https://github.com/scipy/scipy/issues/19002
|
||||
# here len(t) = 29 and len(c) = 25 (== len(t) - k - 1)
|
||||
x = np.asarray([-10., -9., -8., -7., -6., -5., -4., -3., -2.5, -2., -1.5,
|
||||
-1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 4., 5., 6.],
|
||||
dtype="float")
|
||||
t = [-10.0, -10.0, -10.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0,
|
||||
-2.5, -2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0,
|
||||
5.0, 6.0, 6.0, 6.0, 6.0]
|
||||
c = np.asarray([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
|
||||
0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
|
||||
k = 3
|
||||
|
||||
res = spalde(x, (t, c, k))
|
||||
res = np.vstack(res)
|
||||
res_splev = np.asarray([splev(x, (t, c, k), nu) for nu in range(4)])
|
||||
xp_assert_close(res, res_splev.T, atol=1e-15)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,64 @@
|
|||
import itertools
|
||||
import threading
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import scipy.interpolate
|
||||
|
||||
|
||||
class TestGIL:
|
||||
"""Check if the GIL is properly released by scipy.interpolate functions."""
|
||||
|
||||
def setup_method(self):
|
||||
self.messages = []
|
||||
|
||||
def log(self, message):
|
||||
self.messages.append(message)
|
||||
|
||||
def make_worker_thread(self, target, args):
|
||||
log = self.log
|
||||
|
||||
class WorkerThread(threading.Thread):
|
||||
def run(self):
|
||||
log('interpolation started')
|
||||
target(*args)
|
||||
log('interpolation complete')
|
||||
|
||||
return WorkerThread()
|
||||
|
||||
@pytest.mark.xslow
|
||||
@pytest.mark.xfail(reason='race conditions, may depend on system load')
|
||||
def test_rectbivariatespline(self):
|
||||
def generate_params(n_points):
|
||||
x = y = np.linspace(0, 1000, n_points)
|
||||
x_grid, y_grid = np.meshgrid(x, y)
|
||||
z = x_grid * y_grid
|
||||
return x, y, z
|
||||
|
||||
def calibrate_delay(requested_time):
|
||||
for n_points in itertools.count(5000, 1000):
|
||||
args = generate_params(n_points)
|
||||
time_started = time.time()
|
||||
interpolate(*args)
|
||||
if time.time() - time_started > requested_time:
|
||||
return args
|
||||
|
||||
def interpolate(x, y, z):
|
||||
scipy.interpolate.RectBivariateSpline(x, y, z)
|
||||
|
||||
args = calibrate_delay(requested_time=3)
|
||||
worker_thread = self.make_worker_thread(interpolate, args)
|
||||
worker_thread.start()
|
||||
for i in range(3):
|
||||
time.sleep(0.5)
|
||||
self.log('working')
|
||||
worker_thread.join()
|
||||
assert self.messages == [
|
||||
'interpolation started',
|
||||
'working',
|
||||
'working',
|
||||
'working',
|
||||
'interpolation complete',
|
||||
]
|
||||
|
||||
|
|
@ -0,0 +1,452 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import suppress_warnings
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
from scipy._lib._array_api import xp_assert_close, assert_almost_equal
|
||||
|
||||
from scipy._lib._testutils import check_free_memory
|
||||
import scipy.interpolate._interpnd as interpnd
|
||||
import scipy.spatial._qhull as qhull
|
||||
|
||||
import pickle
|
||||
import threading
|
||||
|
||||
_IS_32BIT = (sys.maxsize < 2**32)
|
||||
|
||||
|
||||
def data_file(basename):
|
||||
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'data', basename)
|
||||
|
||||
|
||||
class TestLinearNDInterpolation:
|
||||
def test_smoketest(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
yi = interpnd.LinearNDInterpolator(x, y)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_smoketest_alternate(self):
|
||||
# Test at single points, alternate calling convention
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_complex_smoketest(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
yi = interpnd.LinearNDInterpolator(x, y)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_tri_input(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
interpolator = interpnd.LinearNDInterpolator(tri, y)
|
||||
yi = interpolator(x)
|
||||
assert_almost_equal(y, yi)
|
||||
assert interpolator.tri is tri
|
||||
|
||||
def test_square(self):
|
||||
# Test barycentric interpolation on a square against a manual
|
||||
# implementation
|
||||
|
||||
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.float64)
|
||||
values = np.array([1., 2., -3., 5.], dtype=np.float64)
|
||||
|
||||
# NB: assume triangles (0, 1, 3) and (1, 2, 3)
|
||||
#
|
||||
# 1----2
|
||||
# | \ |
|
||||
# | \ |
|
||||
# 0----3
|
||||
|
||||
def ip(x, y):
|
||||
t1 = (x + y <= 1)
|
||||
t2 = ~t1
|
||||
|
||||
x1 = x[t1]
|
||||
y1 = y[t1]
|
||||
|
||||
x2 = x[t2]
|
||||
y2 = y[t2]
|
||||
|
||||
z = 0*x
|
||||
|
||||
z[t1] = (values[0]*(1 - x1 - y1)
|
||||
+ values[1]*y1
|
||||
+ values[3]*x1)
|
||||
|
||||
z[t2] = (values[2]*(x2 + y2 - 1)
|
||||
+ values[1]*(1 - x2)
|
||||
+ values[3]*(1 - y2))
|
||||
return z
|
||||
|
||||
xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
|
||||
np.linspace(0, 1, 14)[None,:])
|
||||
xx = xx.ravel()
|
||||
yy = yy.ravel()
|
||||
|
||||
xi = np.array([xx, yy]).T.copy()
|
||||
zi = interpnd.LinearNDInterpolator(points, values)(xi)
|
||||
|
||||
assert_almost_equal(zi, ip(xx, yy))
|
||||
|
||||
def test_smoketest_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_square_rescale(self):
|
||||
# Test barycentric interpolation on a rectangle with rescaling
|
||||
# agaings the same implementation without rescaling
|
||||
|
||||
points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.float64)
|
||||
values = np.array([1., 2., -3., 5.], dtype=np.float64)
|
||||
|
||||
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
|
||||
np.linspace(0, 100, 14)[None,:])
|
||||
xx = xx.ravel()
|
||||
yy = yy.ravel()
|
||||
xi = np.array([xx, yy]).T.copy()
|
||||
zi = interpnd.LinearNDInterpolator(points, values)(xi)
|
||||
zi_rescaled = interpnd.LinearNDInterpolator(points, values,
|
||||
rescale=True)(xi)
|
||||
|
||||
assert_almost_equal(zi, zi_rescaled)
|
||||
|
||||
def test_tripoints_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
|
||||
yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
|
||||
rescale=True)(x)
|
||||
assert_almost_equal(yi, yi_rescale)
|
||||
|
||||
def test_tri_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
match = ("Rescaling is not supported when passing a "
|
||||
"Delaunay triangulation as ``points``.")
|
||||
with pytest.raises(ValueError, match=match):
|
||||
interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
|
||||
|
||||
def test_pickle(self):
|
||||
# Test at single points
|
||||
np.random.seed(1234)
|
||||
x = np.random.rand(30, 2)
|
||||
y = np.random.rand(30) + 1j*np.random.rand(30)
|
||||
|
||||
ip = interpnd.LinearNDInterpolator(x, y)
|
||||
ip2 = pickle.loads(pickle.dumps(ip))
|
||||
|
||||
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.thread_unsafe
|
||||
@pytest.mark.skipif(_IS_32BIT, reason='it fails on 32-bit')
|
||||
def test_threading(self):
|
||||
# This test was taken from issue 8856
|
||||
# https://github.com/scipy/scipy/issues/8856
|
||||
check_free_memory(10000)
|
||||
|
||||
r_ticks = np.arange(0, 4200, 10)
|
||||
phi_ticks = np.arange(0, 4200, 10)
|
||||
r_grid, phi_grid = np.meshgrid(r_ticks, phi_ticks)
|
||||
|
||||
def do_interp(interpolator, slice_rows, slice_cols):
|
||||
grid_x, grid_y = np.mgrid[slice_rows, slice_cols]
|
||||
res = interpolator((grid_x, grid_y))
|
||||
return res
|
||||
|
||||
points = np.vstack((r_grid.ravel(), phi_grid.ravel())).T
|
||||
values = (r_grid * phi_grid).ravel()
|
||||
interpolator = interpnd.LinearNDInterpolator(points, values)
|
||||
|
||||
worker_thread_1 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(0, 2100), slice(0, 2100)))
|
||||
worker_thread_2 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(2100, 4200), slice(0, 2100)))
|
||||
worker_thread_3 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(0, 2100), slice(2100, 4200)))
|
||||
worker_thread_4 = threading.Thread(
|
||||
target=do_interp,
|
||||
args=(interpolator, slice(2100, 4200), slice(2100, 4200)))
|
||||
|
||||
worker_thread_1.start()
|
||||
worker_thread_2.start()
|
||||
worker_thread_3.start()
|
||||
worker_thread_4.start()
|
||||
|
||||
worker_thread_1.join()
|
||||
worker_thread_2.join()
|
||||
worker_thread_3.join()
|
||||
worker_thread_4.join()
|
||||
|
||||
|
||||
class TestEstimateGradients2DGlobal:
|
||||
def test_smoketest(self):
|
||||
x = np.array([(0, 0), (0, 2),
|
||||
(1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
|
||||
tri = qhull.Delaunay(x)
|
||||
|
||||
# Should be exact for linear functions, independent of triangulation
|
||||
|
||||
funcs = [
|
||||
(lambda x, y: 0*x + 1, (0, 0)),
|
||||
(lambda x, y: 0 + x, (1, 0)),
|
||||
(lambda x, y: -2 + y, (0, 1)),
|
||||
(lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
|
||||
]
|
||||
|
||||
for j, (func, grad) in enumerate(funcs):
|
||||
z = func(x[:,0], x[:,1])
|
||||
dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
|
||||
|
||||
assert dz.shape == (6, 2)
|
||||
xp_assert_close(
|
||||
dz, np.array(grad)[None, :] + 0*dz, rtol=1e-5, atol=1e-5,
|
||||
err_msg=f"item {j}"
|
||||
)
|
||||
|
||||
def test_regression_2359(self):
|
||||
# Check regression --- for certain point sets, gradient
|
||||
# estimation could end up in an infinite loop
|
||||
points = np.load(data_file('estimate_gradients_hang.npy'))
|
||||
values = np.random.rand(points.shape[0])
|
||||
tri = qhull.Delaunay(points)
|
||||
|
||||
# This should not hang
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(interpnd.GradientEstimationWarning,
|
||||
"Gradient estimation did not converge")
|
||||
interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
|
||||
|
||||
|
||||
class TestCloughTocher2DInterpolator:
|
||||
|
||||
def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False,
|
||||
rescale=False, **kw):
|
||||
rng = np.random.RandomState(1234)
|
||||
# np.random.seed(1234)
|
||||
if x is None:
|
||||
x = np.array([(0, 0), (0, 1),
|
||||
(1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
|
||||
(0.5, 0.2)],
|
||||
dtype=float)
|
||||
|
||||
if not alternate:
|
||||
ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
|
||||
tol=1e-6, rescale=rescale)
|
||||
else:
|
||||
ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
|
||||
func(x[:,0], x[:,1]),
|
||||
tol=1e-6, rescale=rescale)
|
||||
|
||||
p = rng.rand(50, 2)
|
||||
|
||||
if not alternate:
|
||||
a = ip(p)
|
||||
else:
|
||||
a = ip(p[:,0], p[:,1])
|
||||
b = func(p[:,0], p[:,1])
|
||||
|
||||
try:
|
||||
xp_assert_close(a, b, **kw)
|
||||
except AssertionError:
|
||||
print("_check_accuracy: abs(a-b):", abs(a - b))
|
||||
print("ip.grad:", ip.grad)
|
||||
raise
|
||||
|
||||
def test_linear_smoketest(self):
|
||||
# Should be exact for linear functions, independent of triangulation
|
||||
funcs = [
|
||||
lambda x, y: 0*x + 1,
|
||||
lambda x, y: 0 + x,
|
||||
lambda x, y: -2 + y,
|
||||
lambda x, y: 3 + 3*x + 14.15*y,
|
||||
]
|
||||
|
||||
for j, func in enumerate(funcs):
|
||||
self._check_accuracy(
|
||||
func, tol=1e-13, atol=1e-7, rtol=1e-7, err_msg=f"Function {j}"
|
||||
)
|
||||
self._check_accuracy(
|
||||
func, tol=1e-13, atol=1e-7, rtol=1e-7, alternate=True,
|
||||
err_msg=f"Function (alternate) {j}"
|
||||
)
|
||||
# check rescaling
|
||||
self._check_accuracy(
|
||||
func, tol=1e-13, atol=1e-7, rtol=1e-7,
|
||||
err_msg=f"Function (rescaled) {j}", rescale=True
|
||||
)
|
||||
self._check_accuracy(
|
||||
func, tol=1e-13, atol=1e-7, rtol=1e-7, alternate=True, rescale=True,
|
||||
err_msg=f"Function (alternate, rescaled) {j}"
|
||||
)
|
||||
|
||||
def test_quadratic_smoketest(self):
|
||||
# Should be reasonably accurate for quadratic functions
|
||||
funcs = [
|
||||
lambda x, y: x**2,
|
||||
lambda x, y: y**2,
|
||||
lambda x, y: x**2 - y**2,
|
||||
lambda x, y: x*y,
|
||||
]
|
||||
|
||||
for j, func in enumerate(funcs):
|
||||
self._check_accuracy(
|
||||
func, tol=1e-9, atol=0.22, rtol=0, err_msg=f"Function {j}"
|
||||
)
|
||||
self._check_accuracy(
|
||||
func, tol=1e-9, atol=0.22, rtol=0, err_msg=f"Function {j}", rescale=True
|
||||
)
|
||||
|
||||
def test_tri_input(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
|
||||
assert_almost_equal(y, yi)
|
||||
|
||||
def test_tri_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
match = ("Rescaling is not supported when passing a "
|
||||
"Delaunay triangulation as ``points``.")
|
||||
with pytest.raises(ValueError, match=match):
|
||||
interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
|
||||
|
||||
def test_tripoints_input_rescale(self):
|
||||
# Test at single points
|
||||
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 3j*y
|
||||
|
||||
tri = qhull.Delaunay(x)
|
||||
yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
|
||||
yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
|
||||
assert_almost_equal(yi, yi_rescale)
|
||||
|
||||
@pytest.mark.fail_slow(5)
|
||||
def test_dense(self):
|
||||
# Should be more accurate for dense meshes
|
||||
funcs = [
|
||||
lambda x, y: x**2,
|
||||
lambda x, y: y**2,
|
||||
lambda x, y: x**2 - y**2,
|
||||
lambda x, y: x*y,
|
||||
lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
|
||||
]
|
||||
|
||||
rng = np.random.RandomState(4321) # use a different seed than the check!
|
||||
grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
|
||||
rng.rand(30*30, 2)]
|
||||
|
||||
for j, func in enumerate(funcs):
|
||||
self._check_accuracy(
|
||||
func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2, err_msg=f"Function {j}"
|
||||
)
|
||||
self._check_accuracy(
|
||||
func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
|
||||
err_msg=f"Function {j}", rescale=True
|
||||
)
|
||||
|
||||
def test_wrong_ndim(self):
|
||||
x = np.random.randn(30, 3)
|
||||
y = np.random.randn(30)
|
||||
assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
|
||||
|
||||
def test_pickle(self):
|
||||
# Test at single points
|
||||
rng = np.random.RandomState(1234)
|
||||
x = rng.rand(30, 2)
|
||||
y = rng.rand(30) + 1j*rng.rand(30)
|
||||
|
||||
ip = interpnd.CloughTocher2DInterpolator(x, y)
|
||||
ip2 = pickle.loads(pickle.dumps(ip))
|
||||
|
||||
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
|
||||
|
||||
def test_boundary_tri_symmetry(self):
|
||||
# Interpolation at neighbourless triangles should retain
|
||||
# symmetry with mirroring the triangle.
|
||||
|
||||
# Equilateral triangle
|
||||
points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)])
|
||||
values = np.array([1, 0, 0])
|
||||
|
||||
ip = interpnd.CloughTocher2DInterpolator(points, values)
|
||||
|
||||
# Set gradient to zero at vertices
|
||||
ip.grad[...] = 0
|
||||
|
||||
# Interpolation should be symmetric vs. bisector
|
||||
alpha = 0.3
|
||||
p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)])
|
||||
p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)])
|
||||
|
||||
v1 = ip(p1)
|
||||
v2 = ip(p2)
|
||||
xp_assert_close(v1, v2)
|
||||
|
||||
# ... and affine invariant
|
||||
rng = np.random.RandomState(1)
|
||||
A = rng.randn(2, 2)
|
||||
b = rng.randn(2)
|
||||
|
||||
points = A.dot(points.T).T + b[None,:]
|
||||
p1 = A.dot(p1) + b
|
||||
p2 = A.dot(p2) + b
|
||||
|
||||
ip = interpnd.CloughTocher2DInterpolator(points, values)
|
||||
ip.grad[...] = 0
|
||||
|
||||
w1 = ip(p1)
|
||||
w2 = ip(p2)
|
||||
xp_assert_close(w1, v1)
|
||||
xp_assert_close(w2, v2)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,308 @@
|
|||
import numpy as np
|
||||
from scipy._lib._array_api import (
|
||||
xp_assert_equal, xp_assert_close
|
||||
)
|
||||
import pytest
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.interpolate import (griddata, NearestNDInterpolator,
|
||||
LinearNDInterpolator,
|
||||
CloughTocher2DInterpolator)
|
||||
from scipy._lib._testutils import _run_concurrent_barrier
|
||||
|
||||
|
||||
parametrize_interpolators = pytest.mark.parametrize(
|
||||
"interpolator", [NearestNDInterpolator, LinearNDInterpolator,
|
||||
CloughTocher2DInterpolator]
|
||||
)
|
||||
parametrize_methods = pytest.mark.parametrize(
|
||||
'method',
|
||||
('nearest', 'linear', 'cubic'),
|
||||
)
|
||||
parametrize_rescale = pytest.mark.parametrize(
|
||||
'rescale',
|
||||
(True, False),
|
||||
)
|
||||
|
||||
|
||||
class TestGriddata:
|
||||
def test_fill_value(self):
|
||||
x = [(0,0), (0,1), (1,0)]
|
||||
y = [1, 2, 3]
|
||||
|
||||
yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
|
||||
xp_assert_equal(yi, [-1., -1, 1])
|
||||
|
||||
yi = griddata(x, y, [(1,1), (1,2), (0,0)])
|
||||
xp_assert_equal(yi, [np.nan, np.nan, 1])
|
||||
|
||||
@parametrize_methods
|
||||
@parametrize_rescale
|
||||
def test_alternative_call(self, method, rescale):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
|
||||
+ np.array([0,1])[None,:])
|
||||
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
|
||||
rescale=rescale)
|
||||
xp_assert_close(y, yi, atol=1e-14, err_msg=msg)
|
||||
|
||||
@parametrize_methods
|
||||
@parametrize_rescale
|
||||
def test_multivalue_2d(self, method, rescale):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = (np.arange(x.shape[0], dtype=np.float64)[:,None]
|
||||
+ np.array([0,1])[None,:])
|
||||
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata(x, y, x, method=method, rescale=rescale)
|
||||
xp_assert_close(y, yi, atol=1e-14, err_msg=msg)
|
||||
|
||||
@parametrize_methods
|
||||
@parametrize_rescale
|
||||
def test_multipoint_2d(self, method, rescale):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
|
||||
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
|
||||
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata(x, y, xi, method=method, rescale=rescale)
|
||||
|
||||
assert yi.shape == (5, 3), msg
|
||||
xp_assert_close(yi, np.tile(y[:,None], (1, 3)),
|
||||
atol=1e-14, err_msg=msg)
|
||||
|
||||
@parametrize_methods
|
||||
@parametrize_rescale
|
||||
def test_complex_2d(self, method, rescale):
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 2j*y[::-1]
|
||||
|
||||
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
|
||||
|
||||
msg = repr((method, rescale))
|
||||
yi = griddata(x, y, xi, method=method, rescale=rescale)
|
||||
|
||||
assert yi.shape == (5, 3)
|
||||
xp_assert_close(yi, np.tile(y[:,None], (1, 3)),
|
||||
atol=1e-14, err_msg=msg)
|
||||
|
||||
@parametrize_methods
|
||||
def test_1d(self, method):
|
||||
x = np.array([1, 2.5, 3, 4.5, 5, 6])
|
||||
y = np.array([1, 2, 0, 3.9, 2, 1])
|
||||
|
||||
xp_assert_close(griddata(x, y, x, method=method), y,
|
||||
err_msg=method, atol=1e-14)
|
||||
xp_assert_close(griddata(x.reshape(6, 1), y, x, method=method), y,
|
||||
err_msg=method, atol=1e-14)
|
||||
xp_assert_close(griddata((x,), y, (x,), method=method), y,
|
||||
err_msg=method, atol=1e-14)
|
||||
|
||||
def test_1d_borders(self):
|
||||
# Test for nearest neighbor case with xi outside
|
||||
# the range of the values.
|
||||
x = np.array([1, 2.5, 3, 4.5, 5, 6])
|
||||
y = np.array([1, 2, 0, 3.9, 2, 1])
|
||||
xi = np.array([0.9, 6.5])
|
||||
yi_should = np.array([1.0, 1.0])
|
||||
|
||||
method = 'nearest'
|
||||
xp_assert_close(griddata(x, y, xi,
|
||||
method=method), yi_should,
|
||||
err_msg=method,
|
||||
atol=1e-14)
|
||||
xp_assert_close(griddata(x.reshape(6, 1), y, xi,
|
||||
method=method), yi_should,
|
||||
err_msg=method,
|
||||
atol=1e-14)
|
||||
xp_assert_close(griddata((x, ), y, (xi, ),
|
||||
method=method), yi_should,
|
||||
err_msg=method,
|
||||
atol=1e-14)
|
||||
|
||||
@parametrize_methods
|
||||
def test_1d_unsorted(self, method):
|
||||
x = np.array([2.5, 1, 4.5, 5, 6, 3])
|
||||
y = np.array([1, 2, 0, 3.9, 2, 1])
|
||||
|
||||
xp_assert_close(griddata(x, y, x, method=method), y,
|
||||
err_msg=method, atol=1e-10)
|
||||
xp_assert_close(griddata(x.reshape(6, 1), y, x, method=method), y,
|
||||
err_msg=method, atol=1e-10)
|
||||
xp_assert_close(griddata((x,), y, (x,), method=method), y,
|
||||
err_msg=method, atol=1e-10)
|
||||
|
||||
@parametrize_methods
|
||||
def test_square_rescale_manual(self, method):
|
||||
points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.float64)
|
||||
points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)],
|
||||
dtype=np.float64)
|
||||
values = np.array([1., 2., -3., 5., 9.], dtype=np.float64)
|
||||
|
||||
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
|
||||
np.linspace(0, 100, 14)[None,:])
|
||||
xx = xx.ravel()
|
||||
yy = yy.ravel()
|
||||
xi = np.array([xx, yy]).T.copy()
|
||||
|
||||
msg = method
|
||||
zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
|
||||
method=method)
|
||||
zi_rescaled = griddata(points, values, xi, method=method,
|
||||
rescale=True)
|
||||
xp_assert_close(zi, zi_rescaled, err_msg=msg,
|
||||
atol=1e-12)
|
||||
|
||||
@parametrize_methods
|
||||
def test_xi_1d(self, method):
|
||||
# Check that 1-D xi is interpreted as a coordinate
|
||||
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
|
||||
dtype=np.float64)
|
||||
y = np.arange(x.shape[0], dtype=np.float64)
|
||||
y = y - 2j*y[::-1]
|
||||
|
||||
xi = np.array([0.5, 0.5])
|
||||
|
||||
p1 = griddata(x, y, xi, method=method)
|
||||
p2 = griddata(x, y, xi[None,:], method=method)
|
||||
xp_assert_close(p1, p2, err_msg=method)
|
||||
|
||||
xi1 = np.array([0.5])
|
||||
xi3 = np.array([0.5, 0.5, 0.5])
|
||||
assert_raises(ValueError, griddata, x, y, xi1,
|
||||
method=method)
|
||||
assert_raises(ValueError, griddata, x, y, xi3,
|
||||
method=method)
|
||||
|
||||
|
||||
class TestNearestNDInterpolator:
|
||||
def test_nearest_options(self):
|
||||
# smoke test that NearestNDInterpolator accept cKDTree options
|
||||
npts, nd = 4, 3
|
||||
x = np.arange(npts*nd).reshape((npts, nd))
|
||||
y = np.arange(npts)
|
||||
nndi = NearestNDInterpolator(x, y)
|
||||
|
||||
opts = {'balanced_tree': False, 'compact_nodes': False}
|
||||
nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
|
||||
xp_assert_close(nndi(x), nndi_o(x), atol=1e-14)
|
||||
|
||||
def test_nearest_list_argument(self):
|
||||
nd = np.array([[0, 0, 0, 0, 1, 0, 1],
|
||||
[0, 0, 0, 0, 0, 1, 1],
|
||||
[0, 0, 0, 0, 1, 1, 2]])
|
||||
d = nd[:, 3:]
|
||||
|
||||
# z is np.array
|
||||
NI = NearestNDInterpolator((d[0], d[1]), d[2])
|
||||
xp_assert_equal(NI([0.1, 0.9], [0.1, 0.9]), [0.0, 2.0])
|
||||
|
||||
# z is list
|
||||
NI = NearestNDInterpolator((d[0], d[1]), list(d[2]))
|
||||
xp_assert_equal(NI([0.1, 0.9], [0.1, 0.9]), [0.0, 2.0])
|
||||
|
||||
def test_nearest_query_options(self):
|
||||
nd = np.array([[0, 0.5, 0, 1],
|
||||
[0, 0, 0.5, 1],
|
||||
[0, 1, 1, 2]])
|
||||
delta = 0.1
|
||||
query_points = [0 + delta, 1 + delta], [0 + delta, 1 + delta]
|
||||
|
||||
# case 1 - query max_dist is smaller than
|
||||
# the query points' nearest distance to nd.
|
||||
NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
|
||||
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
|
||||
xp_assert_equal(NI(query_points, distance_upper_bound=distance_upper_bound),
|
||||
[np.nan, np.nan])
|
||||
|
||||
# case 2 - query p is inf, will return [0, 2]
|
||||
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) - 1e-7
|
||||
p = np.inf
|
||||
xp_assert_equal(
|
||||
NI(query_points, distance_upper_bound=distance_upper_bound, p=p),
|
||||
[0.0, 2.0]
|
||||
)
|
||||
|
||||
# case 3 - query max_dist is larger, so should return non np.nan
|
||||
distance_upper_bound = np.sqrt(delta ** 2 + delta ** 2) + 1e-7
|
||||
xp_assert_equal(
|
||||
NI(query_points, distance_upper_bound=distance_upper_bound),
|
||||
[0.0, 2.0]
|
||||
)
|
||||
|
||||
def test_nearest_query_valid_inputs(self):
|
||||
nd = np.array([[0, 1, 0, 1],
|
||||
[0, 0, 1, 1],
|
||||
[0, 1, 1, 2]])
|
||||
NI = NearestNDInterpolator((nd[0], nd[1]), nd[2])
|
||||
with assert_raises(TypeError):
|
||||
NI([0.5, 0.5], query_options="not a dictionary")
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_concurrency(self):
|
||||
npts, nd = 50, 3
|
||||
x = np.arange(npts * nd).reshape((npts, nd))
|
||||
y = np.arange(npts)
|
||||
nndi = NearestNDInterpolator(x, y)
|
||||
|
||||
def worker_fn(_, spl):
|
||||
spl(x)
|
||||
|
||||
_run_concurrent_barrier(10, worker_fn, nndi)
|
||||
|
||||
|
||||
class TestNDInterpolators:
|
||||
@parametrize_interpolators
|
||||
def test_broadcastable_input(self, interpolator):
|
||||
# input data
|
||||
rng = np.random.RandomState(0)
|
||||
x = rng.random(10)
|
||||
y = rng.random(10)
|
||||
z = np.hypot(x, y)
|
||||
|
||||
# x-y grid for interpolation
|
||||
X = np.linspace(min(x), max(x))
|
||||
Y = np.linspace(min(y), max(y))
|
||||
X, Y = np.meshgrid(X, Y)
|
||||
XY = np.vstack((X.ravel(), Y.ravel())).T
|
||||
interp = interpolator(list(zip(x, y)), z)
|
||||
# single array input
|
||||
interp_points0 = interp(XY)
|
||||
# tuple input
|
||||
interp_points1 = interp((X, Y))
|
||||
interp_points2 = interp((X, 0.0))
|
||||
# broadcastable input
|
||||
interp_points3 = interp(X, Y)
|
||||
interp_points4 = interp(X, 0.0)
|
||||
|
||||
assert (interp_points0.size ==
|
||||
interp_points1.size ==
|
||||
interp_points2.size ==
|
||||
interp_points3.size ==
|
||||
interp_points4.size)
|
||||
|
||||
@parametrize_interpolators
|
||||
def test_read_only(self, interpolator):
|
||||
# input data
|
||||
rng = np.random.RandomState(0)
|
||||
xy = rng.random((10, 2))
|
||||
x, y = xy[:, 0], xy[:, 1]
|
||||
z = np.hypot(x, y)
|
||||
|
||||
# interpolation points
|
||||
XY = rng.random((50, 2))
|
||||
|
||||
xy.setflags(write=False)
|
||||
z.setflags(write=False)
|
||||
XY.setflags(write=False)
|
||||
|
||||
interp = interpolator(xy, z)
|
||||
interp(XY)
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
import numpy as np
|
||||
from scipy.interpolate import pade
|
||||
from scipy._lib._array_api import (
|
||||
xp_assert_equal, assert_array_almost_equal
|
||||
)
|
||||
|
||||
def test_pade_trivial():
|
||||
nump, denomp = pade([1.0], 0)
|
||||
xp_assert_equal(nump.c, np.asarray([1.0]))
|
||||
xp_assert_equal(denomp.c, np.asarray([1.0]))
|
||||
|
||||
nump, denomp = pade([1.0], 0, 0)
|
||||
xp_assert_equal(nump.c, np.asarray([1.0]))
|
||||
xp_assert_equal(denomp.c, np.asarray([1.0]))
|
||||
|
||||
|
||||
def test_pade_4term_exp():
|
||||
# First four Taylor coefficients of exp(x).
|
||||
# Unlike poly1d, the first array element is the zero-order term.
|
||||
an = [1.0, 1.0, 0.5, 1.0/6]
|
||||
|
||||
nump, denomp = pade(an, 0)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0])
|
||||
|
||||
nump, denomp = pade(an, 1)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2)
|
||||
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 3)
|
||||
assert_array_almost_equal(nump.c, [1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
|
||||
|
||||
# Testing inclusion of optional parameter
|
||||
nump, denomp = pade(an, 0, 3)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0])
|
||||
|
||||
nump, denomp = pade(an, 1, 2)
|
||||
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2, 1)
|
||||
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 3, 0)
|
||||
assert_array_almost_equal(nump.c, [1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
|
||||
|
||||
# Testing reducing array.
|
||||
nump, denomp = pade(an, 0, 2)
|
||||
assert_array_almost_equal(nump.c, [0.5, 1.0, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0])
|
||||
|
||||
nump, denomp = pade(an, 1, 1)
|
||||
assert_array_almost_equal(nump.c, [1.0/2, 1.0])
|
||||
assert_array_almost_equal(denomp.c, [-1.0/2, 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2, 0)
|
||||
assert_array_almost_equal(nump.c, [1.0])
|
||||
assert_array_almost_equal(denomp.c, [1.0/2, -1.0, 1.0])
|
||||
|
||||
|
||||
def test_pade_ints():
|
||||
# Simple test sequences (one of ints, one of floats).
|
||||
an_int = [1, 2, 3, 4]
|
||||
an_flt = [1.0, 2.0, 3.0, 4.0]
|
||||
|
||||
# Make sure integer arrays give the same result as float arrays with same values.
|
||||
for i in range(0, len(an_int)):
|
||||
for j in range(0, len(an_int) - i):
|
||||
|
||||
# Create float and int pade approximation for given order.
|
||||
nump_int, denomp_int = pade(an_int, i, j)
|
||||
nump_flt, denomp_flt = pade(an_flt, i, j)
|
||||
|
||||
# Check that they are the same.
|
||||
xp_assert_equal(nump_int.c, nump_flt.c)
|
||||
xp_assert_equal(denomp_int.c, denomp_flt.c)
|
||||
|
||||
|
||||
def test_pade_complex():
|
||||
# Test sequence with known solutions - see page 6 of 10.1109/PESGM.2012.6344759.
|
||||
# Variable x is parameter - these tests will work with any complex number.
|
||||
x = 0.2 + 0.6j
|
||||
an = [1.0, x, -x*x.conjugate(), x.conjugate()*(x**2) + x*(x.conjugate()**2),
|
||||
-(x**3)*x.conjugate() - 3*(x*x.conjugate())**2 - x*(x.conjugate()**3)]
|
||||
|
||||
nump, denomp = pade(an, 1, 1)
|
||||
assert_array_almost_equal(nump.c, [x + x.conjugate(), 1.0])
|
||||
assert_array_almost_equal(denomp.c, [x.conjugate(), 1.0])
|
||||
|
||||
nump, denomp = pade(an, 1, 2)
|
||||
assert_array_almost_equal(nump.c, [x**2, 2*x + x.conjugate(), 1.0])
|
||||
assert_array_almost_equal(denomp.c, [x + x.conjugate(), 1.0])
|
||||
|
||||
nump, denomp = pade(an, 2, 2)
|
||||
assert_array_almost_equal(
|
||||
nump.c,
|
||||
[x**2 + x*x.conjugate() + x.conjugate()**2, 2*(x + x.conjugate()), 1.0]
|
||||
)
|
||||
assert_array_almost_equal(denomp.c, [x.conjugate()**2, x + 2*x.conjugate(), 1.0])
|
||||
|
|
@ -0,0 +1,972 @@
|
|||
import warnings
|
||||
import io
|
||||
import numpy as np
|
||||
|
||||
from scipy._lib._array_api import (
|
||||
xp_assert_equal, xp_assert_close, assert_array_almost_equal, assert_almost_equal
|
||||
)
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy.interpolate import (
|
||||
KroghInterpolator, krogh_interpolate,
|
||||
BarycentricInterpolator, barycentric_interpolate,
|
||||
approximate_taylor_polynomial, CubicHermiteSpline, pchip,
|
||||
PchipInterpolator, pchip_interpolate, Akima1DInterpolator, CubicSpline,
|
||||
make_interp_spline)
|
||||
from scipy._lib._testutils import _run_concurrent_barrier
|
||||
|
||||
|
||||
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
|
||||
extra_args=None):
|
||||
if extra_args is None:
|
||||
extra_args = {}
|
||||
rng = np.random.RandomState(1234)
|
||||
|
||||
x = [-1, 0, 1, 2, 3, 4]
|
||||
s = list(range(1, len(y_shape)+1))
|
||||
s.insert(axis % (len(y_shape)+1), 0)
|
||||
y = rng.rand(*((6,) + y_shape)).transpose(s)
|
||||
|
||||
xi = np.zeros(x_shape)
|
||||
if interpolator_cls is CubicHermiteSpline:
|
||||
dydx = rng.rand(*((6,) + y_shape)).transpose(s)
|
||||
yi = interpolator_cls(x, y, dydx, axis=axis, **extra_args)(xi)
|
||||
else:
|
||||
yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
|
||||
|
||||
target_shape = ((deriv_shape or ()) + y.shape[:axis]
|
||||
+ x_shape + y.shape[axis:][1:])
|
||||
assert yi.shape == target_shape
|
||||
|
||||
# check it works also with lists
|
||||
if x_shape and y.size > 0:
|
||||
if interpolator_cls is CubicHermiteSpline:
|
||||
interpolator_cls(list(x), list(y), list(dydx), axis=axis,
|
||||
**extra_args)(list(xi))
|
||||
else:
|
||||
interpolator_cls(list(x), list(y), axis=axis,
|
||||
**extra_args)(list(xi))
|
||||
|
||||
# check also values
|
||||
if xi.size > 0 and deriv_shape is None:
|
||||
bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
|
||||
yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
|
||||
yv = yv.reshape(bs_shape)
|
||||
|
||||
yi, y = np.broadcast_arrays(yi, yv)
|
||||
xp_assert_close(yi, y)
|
||||
|
||||
|
||||
SHAPES = [(), (0,), (1,), (6, 2, 5)]
|
||||
|
||||
|
||||
def test_shapes():
|
||||
|
||||
def spl_interp(x, y, axis):
|
||||
return make_interp_spline(x, y, axis=axis)
|
||||
|
||||
for ip in [KroghInterpolator, BarycentricInterpolator, CubicHermiteSpline,
|
||||
pchip, Akima1DInterpolator, CubicSpline, spl_interp]:
|
||||
for s1 in SHAPES:
|
||||
for s2 in SHAPES:
|
||||
for axis in range(-len(s2), len(s2)):
|
||||
if ip != CubicSpline:
|
||||
check_shape(ip, s1, s2, None, axis)
|
||||
else:
|
||||
for bc in ['natural', 'clamped']:
|
||||
extra = {'bc_type': bc}
|
||||
check_shape(ip, s1, s2, None, axis, extra)
|
||||
|
||||
def test_derivs_shapes():
|
||||
for ip in [KroghInterpolator, BarycentricInterpolator]:
|
||||
def interpolator_derivs(x, y, axis=0):
|
||||
return ip(x, y, axis).derivatives
|
||||
|
||||
for s1 in SHAPES:
|
||||
for s2 in SHAPES:
|
||||
for axis in range(-len(s2), len(s2)):
|
||||
check_shape(interpolator_derivs, s1, s2, (6,), axis)
|
||||
|
||||
|
||||
def test_deriv_shapes():
|
||||
def krogh_deriv(x, y, axis=0):
|
||||
return KroghInterpolator(x, y, axis).derivative
|
||||
|
||||
def bary_deriv(x, y, axis=0):
|
||||
return BarycentricInterpolator(x, y, axis).derivative
|
||||
|
||||
def pchip_deriv(x, y, axis=0):
|
||||
return pchip(x, y, axis).derivative()
|
||||
|
||||
def pchip_deriv2(x, y, axis=0):
|
||||
return pchip(x, y, axis).derivative(2)
|
||||
|
||||
def pchip_antideriv(x, y, axis=0):
|
||||
return pchip(x, y, axis).antiderivative()
|
||||
|
||||
def pchip_antideriv2(x, y, axis=0):
|
||||
return pchip(x, y, axis).antiderivative(2)
|
||||
|
||||
def pchip_deriv_inplace(x, y, axis=0):
|
||||
class P(PchipInterpolator):
|
||||
def __call__(self, x):
|
||||
return PchipInterpolator.__call__(self, x, 1)
|
||||
pass
|
||||
return P(x, y, axis)
|
||||
|
||||
def akima_deriv(x, y, axis=0):
|
||||
return Akima1DInterpolator(x, y, axis).derivative()
|
||||
|
||||
def akima_antideriv(x, y, axis=0):
|
||||
return Akima1DInterpolator(x, y, axis).antiderivative()
|
||||
|
||||
def cspline_deriv(x, y, axis=0):
|
||||
return CubicSpline(x, y, axis).derivative()
|
||||
|
||||
def cspline_antideriv(x, y, axis=0):
|
||||
return CubicSpline(x, y, axis).antiderivative()
|
||||
|
||||
def bspl_deriv(x, y, axis=0):
|
||||
return make_interp_spline(x, y, axis=axis).derivative()
|
||||
|
||||
def bspl_antideriv(x, y, axis=0):
|
||||
return make_interp_spline(x, y, axis=axis).antiderivative()
|
||||
|
||||
for ip in [krogh_deriv, bary_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
|
||||
pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
|
||||
cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
|
||||
for s1 in SHAPES:
|
||||
for s2 in SHAPES:
|
||||
for axis in range(-len(s2), len(s2)):
|
||||
check_shape(ip, s1, s2, (), axis)
|
||||
|
||||
|
||||
def test_complex():
|
||||
x = [1, 2, 3, 4]
|
||||
y = [1, 2, 1j, 3]
|
||||
|
||||
for ip in [KroghInterpolator, BarycentricInterpolator, CubicSpline]:
|
||||
p = ip(x, y)
|
||||
xp_assert_close(p(x), np.asarray(y))
|
||||
|
||||
dydx = [0, -1j, 2, 3j]
|
||||
p = CubicHermiteSpline(x, y, dydx)
|
||||
xp_assert_close(p(x), np.asarray(y))
|
||||
xp_assert_close(p(x, 1), np.asarray(dydx))
|
||||
|
||||
|
||||
class TestKrogh:
|
||||
def setup_method(self):
|
||||
self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
|
||||
self.test_xs = np.linspace(-1,1,100)
|
||||
self.xs = np.linspace(-1,1,5)
|
||||
self.ys = self.true_poly(self.xs)
|
||||
|
||||
def test_lagrange(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
|
||||
|
||||
def test_scalar(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_almost_equal(self.true_poly(7), P(7), check_0d=False)
|
||||
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)), check_0d=False)
|
||||
|
||||
def test_derivatives(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
|
||||
D[i])
|
||||
|
||||
def test_low_derivatives(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
D = P.derivatives(self.test_xs,len(self.xs)+2)
|
||||
for i in range(D.shape[0]):
|
||||
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
|
||||
D[i])
|
||||
|
||||
def test_derivative(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
m = 10
|
||||
r = P.derivatives(self.test_xs,m)
|
||||
for i in range(m):
|
||||
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
|
||||
|
||||
def test_high_derivative(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
for i in range(len(self.xs), 2*len(self.xs)):
|
||||
assert_almost_equal(P.derivative(self.test_xs,i),
|
||||
np.zeros(len(self.test_xs)))
|
||||
|
||||
def test_ndim_derivatives(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = KroghInterpolator(self.xs, ys, axis=0)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
xp_assert_close(D[i],
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1))
|
||||
|
||||
def test_ndim_derivative(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = KroghInterpolator(self.xs, ys, axis=0)
|
||||
for i in range(P.n):
|
||||
xp_assert_close(P.derivative(self.test_xs, i),
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1))
|
||||
|
||||
def test_hermite(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
|
||||
|
||||
def test_vector(self):
|
||||
xs = [0, 1, 2]
|
||||
ys = np.array([[0,1],[1,0],[2,1]])
|
||||
P = KroghInterpolator(xs,ys)
|
||||
Pi = [KroghInterpolator(xs,ys[:,i]) for i in range(ys.shape[1])]
|
||||
test_xs = np.linspace(-1,3,100)
|
||||
assert_almost_equal(P(test_xs),
|
||||
np.asarray([p(test_xs) for p in Pi]).T)
|
||||
assert_almost_equal(P.derivatives(test_xs),
|
||||
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
|
||||
(1,2,0)))
|
||||
|
||||
def test_empty(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
xp_assert_equal(P([]), np.asarray([]))
|
||||
|
||||
def test_shapes_scalarvalue(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
assert np.shape(P(0)) == ()
|
||||
assert np.shape(P(np.array(0))) == ()
|
||||
assert np.shape(P([0])) == (1,)
|
||||
assert np.shape(P([0,1])) == (2,)
|
||||
|
||||
def test_shapes_scalarvalue_derivative(self):
|
||||
P = KroghInterpolator(self.xs,self.ys)
|
||||
n = P.n
|
||||
assert np.shape(P.derivatives(0)) == (n,)
|
||||
assert np.shape(P.derivatives(np.array(0))) == (n,)
|
||||
assert np.shape(P.derivatives([0])) == (n, 1)
|
||||
assert np.shape(P.derivatives([0, 1])) == (n, 2)
|
||||
|
||||
def test_shapes_vectorvalue(self):
|
||||
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
|
||||
assert np.shape(P(0)) == (3,)
|
||||
assert np.shape(P([0])) == (1, 3)
|
||||
assert np.shape(P([0, 1])) == (2, 3)
|
||||
|
||||
def test_shapes_1d_vectorvalue(self):
|
||||
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
|
||||
assert np.shape(P(0)) == (1,)
|
||||
assert np.shape(P([0])) == (1, 1)
|
||||
assert np.shape(P([0,1])) == (2, 1)
|
||||
|
||||
def test_shapes_vectorvalue_derivative(self):
|
||||
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
|
||||
n = P.n
|
||||
assert np.shape(P.derivatives(0)) == (n, 3)
|
||||
assert np.shape(P.derivatives([0])) == (n, 1, 3)
|
||||
assert np.shape(P.derivatives([0,1])) == (n, 2, 3)
|
||||
|
||||
def test_wrapper(self):
|
||||
P = KroghInterpolator(self.xs, self.ys)
|
||||
ki = krogh_interpolate
|
||||
assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
|
||||
assert_almost_equal(P.derivative(self.test_xs, 2),
|
||||
ki(self.xs, self.ys, self.test_xs, der=2))
|
||||
assert_almost_equal(P.derivatives(self.test_xs, 2),
|
||||
ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
|
||||
|
||||
def test_int_inputs(self):
|
||||
# Check input args are cast correctly to floats, gh-3669
|
||||
x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
|
||||
13104, 60000]
|
||||
offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
|
||||
-0.48002351, -0.34925329, -0.26503107,
|
||||
-0.13148093, -0.12988833, -0.12979296,
|
||||
-0.12973574, -0.08582937, 0.05])
|
||||
f = KroghInterpolator(x, offset_cdf)
|
||||
|
||||
xp_assert_close(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
|
||||
np.zeros_like(offset_cdf), atol=1e-10)
|
||||
|
||||
def test_derivatives_complex(self):
|
||||
# regression test for gh-7381: krogh.derivatives(0) fails complex y
|
||||
x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
|
||||
func = KroghInterpolator(x, y)
|
||||
cmplx = func.derivatives(0)
|
||||
|
||||
cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
|
||||
1j*KroghInterpolator(x, y.imag).derivatives(0))
|
||||
xp_assert_close(cmplx, cmplx2, atol=1e-15)
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_high_degree_warning(self):
|
||||
with pytest.warns(UserWarning, match="40 degrees provided,"):
|
||||
KroghInterpolator(np.arange(40), np.ones(40))
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_concurrency(self):
|
||||
P = KroghInterpolator(self.xs, self.ys)
|
||||
|
||||
def worker_fn(_, interp):
|
||||
interp(self.xs)
|
||||
|
||||
_run_concurrent_barrier(10, worker_fn, P)
|
||||
|
||||
|
||||
class TestTaylor:
|
||||
def test_exponential(self):
|
||||
degree = 5
|
||||
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
|
||||
for i in range(degree+1):
|
||||
assert_almost_equal(p(0),1)
|
||||
p = p.deriv()
|
||||
assert_almost_equal(p(0),0)
|
||||
|
||||
|
||||
class TestBarycentric:
|
||||
def setup_method(self):
|
||||
self.true_poly = np.polynomial.Polynomial([-4, 5, 1, 3, -2])
|
||||
self.test_xs = np.linspace(-1, 1, 100)
|
||||
self.xs = np.linspace(-1, 1, 5)
|
||||
self.ys = self.true_poly(self.xs)
|
||||
|
||||
def test_lagrange(self):
|
||||
# Ensure backwards compatible post SPEC7
|
||||
P = BarycentricInterpolator(self.xs, self.ys, random_state=1)
|
||||
xp_assert_close(P(self.test_xs), self.true_poly(self.test_xs))
|
||||
|
||||
def test_scalar(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys, rng=1)
|
||||
xp_assert_close(P(7), self.true_poly(7), check_0d=False)
|
||||
xp_assert_close(P(np.array(7)), self.true_poly(np.array(7)), check_0d=False)
|
||||
|
||||
def test_derivatives(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
xp_assert_close(self.true_poly.deriv(i)(self.test_xs), D[i])
|
||||
|
||||
def test_low_derivatives(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
D = P.derivatives(self.test_xs, len(self.xs)+2)
|
||||
for i in range(D.shape[0]):
|
||||
xp_assert_close(self.true_poly.deriv(i)(self.test_xs),
|
||||
D[i],
|
||||
atol=1e-12)
|
||||
|
||||
def test_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
m = 10
|
||||
r = P.derivatives(self.test_xs, m)
|
||||
for i in range(m):
|
||||
xp_assert_close(P.derivative(self.test_xs, i), r[i])
|
||||
|
||||
def test_high_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
for i in range(len(self.xs), 5*len(self.xs)):
|
||||
xp_assert_close(P.derivative(self.test_xs, i),
|
||||
np.zeros(len(self.test_xs)))
|
||||
|
||||
def test_ndim_derivatives(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = BarycentricInterpolator(self.xs, ys, axis=0)
|
||||
D = P.derivatives(self.test_xs)
|
||||
for i in range(D.shape[0]):
|
||||
xp_assert_close(D[i],
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1),
|
||||
atol=1e-12)
|
||||
|
||||
def test_ndim_derivative(self):
|
||||
poly1 = self.true_poly
|
||||
poly2 = np.polynomial.Polynomial([-2, 5, 3, -1])
|
||||
poly3 = np.polynomial.Polynomial([12, -3, 4, -5, 6])
|
||||
ys = np.stack((poly1(self.xs), poly2(self.xs), poly3(self.xs)), axis=-1)
|
||||
|
||||
P = BarycentricInterpolator(self.xs, ys, axis=0)
|
||||
for i in range(P.n):
|
||||
xp_assert_close(P.derivative(self.test_xs, i),
|
||||
np.stack((poly1.deriv(i)(self.test_xs),
|
||||
poly2.deriv(i)(self.test_xs),
|
||||
poly3.deriv(i)(self.test_xs)),
|
||||
axis=-1),
|
||||
atol=1e-12)
|
||||
|
||||
def test_delayed(self):
|
||||
P = BarycentricInterpolator(self.xs)
|
||||
P.set_yi(self.ys)
|
||||
assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
|
||||
|
||||
def test_append(self):
|
||||
P = BarycentricInterpolator(self.xs[:3], self.ys[:3])
|
||||
P.add_xi(self.xs[3:], self.ys[3:])
|
||||
assert_almost_equal(self.true_poly(self.test_xs), P(self.test_xs))
|
||||
|
||||
def test_vector(self):
|
||||
xs = [0, 1, 2]
|
||||
ys = np.array([[0, 1], [1, 0], [2, 1]])
|
||||
BI = BarycentricInterpolator
|
||||
P = BI(xs, ys)
|
||||
Pi = [BI(xs, ys[:, i]) for i in range(ys.shape[1])]
|
||||
test_xs = np.linspace(-1, 3, 100)
|
||||
assert_almost_equal(P(test_xs),
|
||||
np.asarray([p(test_xs) for p in Pi]).T)
|
||||
|
||||
def test_shapes_scalarvalue(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
assert np.shape(P(0)) == ()
|
||||
assert np.shape(P(np.array(0))) == ()
|
||||
assert np.shape(P([0])) == (1,)
|
||||
assert np.shape(P([0, 1])) == (2,)
|
||||
|
||||
def test_shapes_scalarvalue_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs,self.ys)
|
||||
n = P.n
|
||||
assert np.shape(P.derivatives(0)) == (n,)
|
||||
assert np.shape(P.derivatives(np.array(0))) == (n,)
|
||||
assert np.shape(P.derivatives([0])) == (n,1)
|
||||
assert np.shape(P.derivatives([0,1])) == (n,2)
|
||||
|
||||
def test_shapes_vectorvalue(self):
|
||||
P = BarycentricInterpolator(self.xs, np.outer(self.ys, np.arange(3)))
|
||||
assert np.shape(P(0)) == (3,)
|
||||
assert np.shape(P([0])) == (1, 3)
|
||||
assert np.shape(P([0, 1])) == (2, 3)
|
||||
|
||||
def test_shapes_1d_vectorvalue(self):
|
||||
P = BarycentricInterpolator(self.xs, np.outer(self.ys, [1]))
|
||||
assert np.shape(P(0)) == (1,)
|
||||
assert np.shape(P([0])) == (1, 1)
|
||||
assert np.shape(P([0, 1])) == (2, 1)
|
||||
|
||||
def test_shapes_vectorvalue_derivative(self):
|
||||
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
|
||||
n = P.n
|
||||
assert np.shape(P.derivatives(0)) == (n, 3)
|
||||
assert np.shape(P.derivatives([0])) == (n, 1, 3)
|
||||
assert np.shape(P.derivatives([0, 1])) == (n, 2, 3)
|
||||
|
||||
def test_wrapper(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys, rng=1)
|
||||
bi = barycentric_interpolate
|
||||
xp_assert_close(P(self.test_xs), bi(self.xs, self.ys, self.test_xs, rng=1))
|
||||
xp_assert_close(P.derivative(self.test_xs, 2),
|
||||
bi(self.xs, self.ys, self.test_xs, der=2, rng=1))
|
||||
xp_assert_close(P.derivatives(self.test_xs, 2),
|
||||
bi(self.xs, self.ys, self.test_xs, der=[0, 1], rng=1))
|
||||
|
||||
def test_int_input(self):
|
||||
x = 1000 * np.arange(1, 11) # np.prod(x[-1] - x[:-1]) overflows
|
||||
y = np.arange(1, 11)
|
||||
value = barycentric_interpolate(x, y, 1000 * 9.5)
|
||||
assert_almost_equal(value, np.asarray(9.5))
|
||||
|
||||
def test_large_chebyshev(self):
|
||||
# The weights for Chebyshev points of the second kind have analytically
|
||||
# solvable weights. Naive calculation of barycentric weights will fail
|
||||
# for large N because of numerical underflow and overflow. We test
|
||||
# correctness for large N against analytical Chebyshev weights.
|
||||
|
||||
# Without capacity scaling or permutation, n=800 fails,
|
||||
# With just capacity scaling, n=1097 fails
|
||||
# With both capacity scaling and random permutation, n=30000 succeeds
|
||||
n = 1100
|
||||
j = np.arange(n + 1).astype(np.float64)
|
||||
x = np.cos(j * np.pi / n)
|
||||
|
||||
# See page 506 of Berrut and Trefethen 2004 for this formula
|
||||
w = (-1) ** j
|
||||
w[0] *= 0.5
|
||||
w[-1] *= 0.5
|
||||
|
||||
P = BarycentricInterpolator(x)
|
||||
|
||||
# It's okay to have a constant scaling factor in the weights because it
|
||||
# cancels out in the evaluation of the polynomial.
|
||||
factor = P.wi[0]
|
||||
assert_almost_equal(P.wi / (2 * factor), w)
|
||||
|
||||
def test_warning(self):
|
||||
# Test if the divide-by-zero warning is properly ignored when computing
|
||||
# interpolated values equals to interpolation points
|
||||
P = BarycentricInterpolator([0, 1], [1, 2])
|
||||
with np.errstate(divide='raise'):
|
||||
yi = P(P.xi)
|
||||
|
||||
# Check if the interpolated values match the input values
|
||||
# at the nodes
|
||||
assert_almost_equal(yi, P.yi.ravel())
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_repeated_node(self):
|
||||
# check that a repeated node raises a ValueError
|
||||
# (computing the weights requires division by xi[i] - xi[j])
|
||||
xis = np.array([0.1, 0.5, 0.9, 0.5])
|
||||
ys = np.array([1, 2, 3, 4])
|
||||
with pytest.raises(ValueError,
|
||||
match="Interpolation points xi must be distinct."):
|
||||
BarycentricInterpolator(xis, ys)
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_concurrency(self):
|
||||
P = BarycentricInterpolator(self.xs, self.ys)
|
||||
|
||||
def worker_fn(_, interp):
|
||||
interp(self.xs)
|
||||
|
||||
_run_concurrent_barrier(10, worker_fn, P)
|
||||
|
||||
|
||||
class TestPCHIP:
|
||||
def _make_random(self, npts=20):
|
||||
rng = np.random.RandomState(1234)
|
||||
xi = np.sort(rng.random(npts))
|
||||
yi = rng.random(npts)
|
||||
return pchip(xi, yi), xi, yi
|
||||
|
||||
def test_overshoot(self):
|
||||
# PCHIP should not overshoot
|
||||
p, xi, yi = self._make_random()
|
||||
for i in range(len(xi)-1):
|
||||
x1, x2 = xi[i], xi[i+1]
|
||||
y1, y2 = yi[i], yi[i+1]
|
||||
if y1 > y2:
|
||||
y1, y2 = y2, y1
|
||||
xp = np.linspace(x1, x2, 10)
|
||||
yp = p(xp)
|
||||
assert ((y1 <= yp + 1e-15) & (yp <= y2 + 1e-15)).all()
|
||||
|
||||
def test_monotone(self):
|
||||
# PCHIP should preserve monotonicty
|
||||
p, xi, yi = self._make_random()
|
||||
for i in range(len(xi)-1):
|
||||
x1, x2 = xi[i], xi[i+1]
|
||||
y1, y2 = yi[i], yi[i+1]
|
||||
xp = np.linspace(x1, x2, 10)
|
||||
yp = p(xp)
|
||||
assert ((y2-y1) * (yp[1:] - yp[:1]) > 0).all()
|
||||
|
||||
def test_cast(self):
|
||||
# regression test for integer input data, see gh-3453
|
||||
data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
|
||||
[-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
|
||||
xx = np.arange(100)
|
||||
curve = pchip(data[0], data[1])(xx)
|
||||
|
||||
data1 = data * 1.0
|
||||
curve1 = pchip(data1[0], data1[1])(xx)
|
||||
|
||||
xp_assert_close(curve, curve1, atol=1e-14, rtol=1e-14)
|
||||
|
||||
def test_nag(self):
|
||||
# Example from NAG C implementation,
|
||||
# http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
|
||||
# suggested in gh-5326 as a smoke test for the way the derivatives
|
||||
# are computed (see also gh-3453)
|
||||
dataStr = '''
|
||||
7.99 0.00000E+0
|
||||
8.09 0.27643E-4
|
||||
8.19 0.43750E-1
|
||||
8.70 0.16918E+0
|
||||
9.20 0.46943E+0
|
||||
10.00 0.94374E+0
|
||||
12.00 0.99864E+0
|
||||
15.00 0.99992E+0
|
||||
20.00 0.99999E+0
|
||||
'''
|
||||
data = np.loadtxt(io.StringIO(dataStr))
|
||||
pch = pchip(data[:,0], data[:,1])
|
||||
|
||||
resultStr = '''
|
||||
7.9900 0.0000
|
||||
9.1910 0.4640
|
||||
10.3920 0.9645
|
||||
11.5930 0.9965
|
||||
12.7940 0.9992
|
||||
13.9950 0.9998
|
||||
15.1960 0.9999
|
||||
16.3970 1.0000
|
||||
17.5980 1.0000
|
||||
18.7990 1.0000
|
||||
20.0000 1.0000
|
||||
'''
|
||||
result = np.loadtxt(io.StringIO(resultStr))
|
||||
xp_assert_close(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
|
||||
|
||||
def test_endslopes(self):
|
||||
# this is a smoke test for gh-3453: PCHIP interpolator should not
|
||||
# set edge slopes to zero if the data do not suggest zero edge derivatives
|
||||
x = np.array([0.0, 0.1, 0.25, 0.35])
|
||||
y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
|
||||
y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
|
||||
for pp in (pchip(x, y1), pchip(x, y2)):
|
||||
for t in (x[0], x[-1]):
|
||||
assert pp(t, 1) != 0
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_all_zeros(self):
|
||||
x = np.arange(10)
|
||||
y = np.zeros_like(x)
|
||||
|
||||
# this should work and not generate any warnings
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('error')
|
||||
pch = pchip(x, y)
|
||||
|
||||
xx = np.linspace(0, 9, 101)
|
||||
assert all(pch(xx) == 0.)
|
||||
|
||||
def test_two_points(self):
|
||||
# regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
|
||||
# it tries to use a three-point scheme to estimate edge derivatives,
|
||||
# while there are only two points available.
|
||||
# Instead, it should construct a linear interpolator.
|
||||
x = np.linspace(0, 1, 11)
|
||||
p = pchip([0, 1], [0, 2])
|
||||
xp_assert_close(p(x), 2*x, atol=1e-15)
|
||||
|
||||
def test_pchip_interpolate(self):
|
||||
assert_array_almost_equal(
|
||||
pchip_interpolate([1, 2, 3], [4, 5, 6], [0.5], der=1),
|
||||
np.asarray([1.]))
|
||||
|
||||
assert_array_almost_equal(
|
||||
pchip_interpolate([1, 2, 3], [4, 5, 6], [0.5], der=0),
|
||||
np.asarray([3.5]))
|
||||
|
||||
assert_array_almost_equal(
|
||||
np.asarray(pchip_interpolate([1, 2, 3], [4, 5, 6], [0.5], der=[0, 1])),
|
||||
np.asarray([[3.5], [1]]))
|
||||
|
||||
def test_roots(self):
|
||||
# regression test for gh-6357: .roots method should work
|
||||
p = pchip([0, 1], [-1, 1])
|
||||
r = p.roots()
|
||||
xp_assert_close(r, np.asarray([0.5]))
|
||||
|
||||
|
||||
class TestCubicSpline:
|
||||
@staticmethod
|
||||
def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
|
||||
tol=1e-14):
|
||||
"""Check that spline coefficients satisfy the continuity and boundary
|
||||
conditions."""
|
||||
x = S.x
|
||||
c = S.c
|
||||
dx = np.diff(x)
|
||||
dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
|
||||
dxi = dx[:-1]
|
||||
|
||||
# Check C2 continuity.
|
||||
xp_assert_close(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
|
||||
c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
|
||||
xp_assert_close(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
|
||||
2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
|
||||
xp_assert_close(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
|
||||
rtol=tol, atol=tol)
|
||||
|
||||
# Check that we found a parabola, the third derivative is 0.
|
||||
if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
|
||||
xp_assert_close(c[0], np.zeros_like(c[0]), rtol=tol, atol=tol)
|
||||
return
|
||||
|
||||
# Check periodic boundary conditions.
|
||||
if bc_start == 'periodic':
|
||||
xp_assert_close(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
|
||||
xp_assert_close(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
|
||||
xp_assert_close(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
|
||||
return
|
||||
|
||||
# Check other boundary conditions.
|
||||
if bc_start == 'not-a-knot':
|
||||
if x.size == 2:
|
||||
slope = (S(x[1]) - S(x[0])) / dx[0]
|
||||
slope = np.asarray(slope)
|
||||
xp_assert_close(S(x[0], 1), slope, rtol=tol, atol=tol)
|
||||
else:
|
||||
xp_assert_close(c[0, 0], c[0, 1], rtol=tol, atol=tol)
|
||||
elif bc_start == 'clamped':
|
||||
xp_assert_close(
|
||||
S(x[0], 1), np.zeros_like(S(x[0], 1)), rtol=tol, atol=tol)
|
||||
elif bc_start == 'natural':
|
||||
xp_assert_close(
|
||||
S(x[0], 2), np.zeros_like(S(x[0], 2)), rtol=tol, atol=tol)
|
||||
else:
|
||||
order, value = bc_start
|
||||
xp_assert_close(S(x[0], order), np.asarray(value), rtol=tol, atol=tol)
|
||||
|
||||
if bc_end == 'not-a-knot':
|
||||
if x.size == 2:
|
||||
slope = (S(x[1]) - S(x[0])) / dx[0]
|
||||
slope = np.asarray(slope)
|
||||
xp_assert_close(S(x[1], 1), slope, rtol=tol, atol=tol)
|
||||
else:
|
||||
xp_assert_close(c[0, -1], c[0, -2], rtol=tol, atol=tol)
|
||||
elif bc_end == 'clamped':
|
||||
xp_assert_close(S(x[-1], 1), np.zeros_like(S(x[-1], 1)),
|
||||
rtol=tol, atol=tol)
|
||||
elif bc_end == 'natural':
|
||||
xp_assert_close(S(x[-1], 2), np.zeros_like(S(x[-1], 2)),
|
||||
rtol=2*tol, atol=2*tol)
|
||||
else:
|
||||
order, value = bc_end
|
||||
xp_assert_close(S(x[-1], order), np.asarray(value), rtol=tol, atol=tol)
|
||||
|
||||
def check_all_bc(self, x, y, axis):
|
||||
deriv_shape = list(y.shape)
|
||||
del deriv_shape[axis]
|
||||
first_deriv = np.empty(deriv_shape)
|
||||
first_deriv.fill(2)
|
||||
second_deriv = np.empty(deriv_shape)
|
||||
second_deriv.fill(-1)
|
||||
bc_all = [
|
||||
'not-a-knot',
|
||||
'natural',
|
||||
'clamped',
|
||||
(1, first_deriv),
|
||||
(2, second_deriv)
|
||||
]
|
||||
for bc in bc_all[:3]:
|
||||
S = CubicSpline(x, y, axis=axis, bc_type=bc)
|
||||
self.check_correctness(S, bc, bc)
|
||||
|
||||
for bc_start in bc_all:
|
||||
for bc_end in bc_all:
|
||||
S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
|
||||
self.check_correctness(S, bc_start, bc_end, tol=2e-14)
|
||||
|
||||
def test_general(self):
|
||||
x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
|
||||
y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
|
||||
for n in [2, 3, x.size]:
|
||||
self.check_all_bc(x[:n], y[:n], 0)
|
||||
|
||||
Y = np.empty((2, n, 2))
|
||||
Y[0, :, 0] = y[:n]
|
||||
Y[0, :, 1] = y[:n] - 1
|
||||
Y[1, :, 0] = y[:n] + 2
|
||||
Y[1, :, 1] = y[:n] + 3
|
||||
self.check_all_bc(x[:n], Y, 1)
|
||||
|
||||
def test_periodic(self):
|
||||
for n in [2, 3, 5]:
|
||||
x = np.linspace(0, 2 * np.pi, n)
|
||||
y = np.cos(x)
|
||||
S = CubicSpline(x, y, bc_type='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
|
||||
Y = np.empty((2, n, 2))
|
||||
Y[0, :, 0] = y
|
||||
Y[0, :, 1] = y + 2
|
||||
Y[1, :, 0] = y - 1
|
||||
Y[1, :, 1] = y + 5
|
||||
S = CubicSpline(x, Y, axis=1, bc_type='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
|
||||
def test_periodic_eval(self):
|
||||
x = np.linspace(0, 2 * np.pi, 10)
|
||||
y = np.cos(x)
|
||||
S = CubicSpline(x, y, bc_type='periodic')
|
||||
assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
|
||||
|
||||
def test_second_derivative_continuity_gh_11758(self):
|
||||
# gh-11758: C2 continuity fail
|
||||
x = np.array([0.9, 1.3, 1.9, 2.1, 2.6, 3.0, 3.9, 4.4, 4.7, 5.0, 6.0,
|
||||
7.0, 8.0, 9.2, 10.5, 11.3, 11.6, 12.0, 12.6, 13.0, 13.3])
|
||||
y = np.array([1.3, 1.5, 1.85, 2.1, 2.6, 2.7, 2.4, 2.15, 2.05, 2.1,
|
||||
2.25, 2.3, 2.25, 1.95, 1.4, 0.9, 0.7, 0.6, 0.5, 0.4, 1.3])
|
||||
S = CubicSpline(x, y, bc_type='periodic', extrapolate='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
|
||||
def test_three_points(self):
|
||||
# gh-11758: Fails computing a_m2_m1
|
||||
# In this case, s (first derivatives) could be found manually by solving
|
||||
# system of 2 linear equations. Due to solution of this system,
|
||||
# s[i] = (h1m2 + h2m1) / (h1 + h2), where h1 = x[1] - x[0], h2 = x[2] - x[1],
|
||||
# m1 = (y[1] - y[0]) / h1, m2 = (y[2] - y[1]) / h2
|
||||
x = np.array([1.0, 2.75, 3.0])
|
||||
y = np.array([1.0, 15.0, 1.0])
|
||||
S = CubicSpline(x, y, bc_type='periodic')
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
xp_assert_close(S.derivative(1)(x), np.array([-48.0, -48.0, -48.0]))
|
||||
|
||||
def test_periodic_three_points_multidim(self):
|
||||
# make sure one multidimensional interpolator does the same as multiple
|
||||
# one-dimensional interpolators
|
||||
x = np.array([0.0, 1.0, 3.0])
|
||||
y = np.array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
|
||||
S = CubicSpline(x, y, bc_type="periodic")
|
||||
self.check_correctness(S, 'periodic', 'periodic')
|
||||
S0 = CubicSpline(x, y[:, 0], bc_type="periodic")
|
||||
S1 = CubicSpline(x, y[:, 1], bc_type="periodic")
|
||||
q = np.linspace(0, 2, 5)
|
||||
xp_assert_close(S(q)[:, 0], S0(q))
|
||||
xp_assert_close(S(q)[:, 1], S1(q))
|
||||
|
||||
def test_dtypes(self):
|
||||
x = np.array([0, 1, 2, 3], dtype=int)
|
||||
y = np.array([-5, 2, 3, 1], dtype=int)
|
||||
S = CubicSpline(x, y)
|
||||
self.check_correctness(S)
|
||||
|
||||
y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
|
||||
S = CubicSpline(x, y)
|
||||
self.check_correctness(S)
|
||||
|
||||
S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
|
||||
self.check_correctness(S, "natural", (1, 2j))
|
||||
|
||||
y = np.array([-5, 2, 3, 1])
|
||||
S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
|
||||
self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
|
||||
|
||||
def test_small_dx(self):
|
||||
rng = np.random.RandomState(0)
|
||||
x = np.sort(rng.uniform(size=100))
|
||||
y = 1e4 + rng.uniform(size=100)
|
||||
S = CubicSpline(x, y)
|
||||
self.check_correctness(S, tol=1e-13)
|
||||
|
||||
def test_incorrect_inputs(self):
|
||||
x = np.array([1, 2, 3, 4])
|
||||
y = np.array([1, 2, 3, 4])
|
||||
xc = np.array([1 + 1j, 2, 3, 4])
|
||||
xn = np.array([np.nan, 2, 3, 4])
|
||||
xo = np.array([2, 1, 3, 4])
|
||||
yn = np.array([np.nan, 2, 3, 4])
|
||||
y3 = [1, 2, 3]
|
||||
x1 = [1]
|
||||
y1 = [1]
|
||||
|
||||
assert_raises(ValueError, CubicSpline, xc, y)
|
||||
assert_raises(ValueError, CubicSpline, xn, y)
|
||||
assert_raises(ValueError, CubicSpline, x, yn)
|
||||
assert_raises(ValueError, CubicSpline, xo, y)
|
||||
assert_raises(ValueError, CubicSpline, x, y3)
|
||||
assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
|
||||
assert_raises(ValueError, CubicSpline, x1, y1)
|
||||
|
||||
wrong_bc = [('periodic', 'clamped'),
|
||||
((2, 0), (3, 10)),
|
||||
((1, 0), ),
|
||||
(0., 0.),
|
||||
'not-a-typo']
|
||||
|
||||
for bc_type in wrong_bc:
|
||||
assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
|
||||
|
||||
# Shapes mismatch when giving arbitrary derivative values:
|
||||
Y = np.c_[y, y]
|
||||
bc1 = ('clamped', (1, 0))
|
||||
bc2 = ('clamped', (1, [0, 0, 0]))
|
||||
bc3 = ('clamped', (1, [[0, 0]]))
|
||||
assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
|
||||
assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
|
||||
assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
|
||||
|
||||
# periodic condition, y[-1] must be equal to y[0]:
|
||||
assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
|
||||
|
||||
|
||||
def test_CubicHermiteSpline_correctness():
|
||||
x = [0, 2, 7]
|
||||
y = [-1, 2, 3]
|
||||
dydx = [0, 3, 7]
|
||||
s = CubicHermiteSpline(x, y, dydx)
|
||||
xp_assert_close(s(x), y, check_shape=False, check_dtype=False, rtol=1e-15)
|
||||
xp_assert_close(s(x, 1), dydx, check_shape=False, check_dtype=False, rtol=1e-15)
|
||||
|
||||
|
||||
def test_CubicHermiteSpline_error_handling():
|
||||
x = [1, 2, 3]
|
||||
y = [0, 3, 5]
|
||||
dydx = [1, -1, 2, 3]
|
||||
assert_raises(ValueError, CubicHermiteSpline, x, y, dydx)
|
||||
|
||||
dydx_with_nan = [1, 0, np.nan]
|
||||
assert_raises(ValueError, CubicHermiteSpline, x, y, dydx_with_nan)
|
||||
|
||||
|
||||
def test_roots_extrapolate_gh_11185():
|
||||
x = np.array([0.001, 0.002])
|
||||
y = np.array([1.66066935e-06, 1.10410807e-06])
|
||||
dy = np.array([-1.60061854, -1.600619])
|
||||
p = CubicHermiteSpline(x, y, dy)
|
||||
|
||||
# roots(extrapolate=True) for a polynomial with a single interval
|
||||
# should return all three real roots
|
||||
r = p.roots(extrapolate=True)
|
||||
assert p.c.shape[1] == 1
|
||||
assert r.size == 3
|
||||
|
||||
|
||||
class TestZeroSizeArrays:
|
||||
# regression tests for gh-17241 : CubicSpline et al must not segfault
|
||||
# when y.size == 0
|
||||
# The two methods below are _almost_ the same, but not quite:
|
||||
# one is for objects which have the `bc_type` argument (CubicSpline)
|
||||
# and the other one is for those which do not (Pchip, Akima1D)
|
||||
|
||||
@pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
|
||||
np.zeros((10, 5, 0))])
|
||||
@pytest.mark.parametrize('bc_type',
|
||||
['not-a-knot', 'periodic', 'natural', 'clamped'])
|
||||
@pytest.mark.parametrize('axis', [0, 1, 2])
|
||||
@pytest.mark.parametrize('cls', [make_interp_spline, CubicSpline])
|
||||
def test_zero_size(self, cls, y, bc_type, axis):
|
||||
x = np.arange(10)
|
||||
xval = np.arange(3)
|
||||
|
||||
obj = cls(x, y, bc_type=bc_type)
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == xval.shape + y.shape[1:]
|
||||
|
||||
# Also check with an explicit non-default axis
|
||||
yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
|
||||
|
||||
obj = cls(x, yt, bc_type=bc_type, axis=axis)
|
||||
sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == sh
|
||||
|
||||
@pytest.mark.parametrize('y', [np.zeros((10, 0, 5)),
|
||||
np.zeros((10, 5, 0))])
|
||||
@pytest.mark.parametrize('axis', [0, 1, 2])
|
||||
@pytest.mark.parametrize('cls', [PchipInterpolator, Akima1DInterpolator])
|
||||
def test_zero_size_2(self, cls, y, axis):
|
||||
x = np.arange(10)
|
||||
xval = np.arange(3)
|
||||
|
||||
obj = cls(x, y)
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == xval.shape + y.shape[1:]
|
||||
|
||||
# Also check with an explicit non-default axis
|
||||
yt = np.moveaxis(y, 0, axis) # (10, 0, 5) --> (0, 10, 5) if axis=1 etc
|
||||
|
||||
obj = cls(x, yt, axis=axis)
|
||||
sh = yt.shape[:axis] + (xval.size, ) + yt.shape[axis+1:]
|
||||
assert obj(xval).size == 0
|
||||
assert obj(xval).shape == sh
|
||||
|
|
@ -0,0 +1,246 @@
|
|||
# Created by John Travers, Robert Hetland, 2007
|
||||
""" Test functions for rbf module """
|
||||
|
||||
import numpy as np
|
||||
|
||||
from scipy._lib._array_api import assert_array_almost_equal, assert_almost_equal
|
||||
|
||||
from numpy import linspace, sin, cos, exp, allclose
|
||||
from scipy.interpolate._rbf import Rbf
|
||||
from scipy._lib._testutils import _run_concurrent_barrier
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
|
||||
'cubic', 'quintic', 'thin-plate', 'linear')
|
||||
|
||||
|
||||
def check_rbf1d_interpolation(function):
|
||||
# Check that the Rbf function interpolates through the nodes (1D)
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y, function=function)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
assert_almost_equal(rbf(float(x[0])), y[0], check_0d=False)
|
||||
|
||||
|
||||
def check_rbf2d_interpolation(function):
|
||||
# Check that the Rbf function interpolates through the nodes (2D).
|
||||
rng = np.random.RandomState(1234)
|
||||
x = rng.rand(50,1)*4-2
|
||||
y = rng.rand(50,1)*4-2
|
||||
z = x*exp(-x**2-1j*y**2)
|
||||
rbf = Rbf(x, y, z, epsilon=2, function=function)
|
||||
zi = rbf(x, y)
|
||||
zi.shape = x.shape
|
||||
assert_array_almost_equal(z, zi)
|
||||
|
||||
|
||||
def check_rbf3d_interpolation(function):
|
||||
# Check that the Rbf function interpolates through the nodes (3D).
|
||||
rng = np.random.RandomState(1234)
|
||||
x = rng.rand(50, 1)*4 - 2
|
||||
y = rng.rand(50, 1)*4 - 2
|
||||
z = rng.rand(50, 1)*4 - 2
|
||||
d = x*exp(-x**2 - y**2)
|
||||
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
|
||||
di = rbf(x, y, z)
|
||||
di.shape = x.shape
|
||||
assert_array_almost_equal(di, d)
|
||||
|
||||
|
||||
def test_rbf_interpolation():
|
||||
for function in FUNCTIONS:
|
||||
check_rbf1d_interpolation(function)
|
||||
check_rbf2d_interpolation(function)
|
||||
check_rbf3d_interpolation(function)
|
||||
|
||||
|
||||
def check_2drbf1d_interpolation(function):
|
||||
# Check that the 2-D Rbf function interpolates through the nodes (1D)
|
||||
x = linspace(0, 10, 9)
|
||||
y0 = sin(x)
|
||||
y1 = cos(x)
|
||||
y = np.vstack([y0, y1]).T
|
||||
rbf = Rbf(x, y, function=function, mode='N-D')
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
assert_almost_equal(rbf(float(x[0])), y[0])
|
||||
|
||||
|
||||
def check_2drbf2d_interpolation(function):
|
||||
# Check that the 2-D Rbf function interpolates through the nodes (2D).
|
||||
rng = np.random.RandomState(1234)
|
||||
x = rng.rand(50, ) * 4 - 2
|
||||
y = rng.rand(50, ) * 4 - 2
|
||||
z0 = x * exp(-x ** 2 - 1j * y ** 2)
|
||||
z1 = y * exp(-y ** 2 - 1j * x ** 2)
|
||||
z = np.vstack([z0, z1]).T
|
||||
rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
|
||||
zi = rbf(x, y)
|
||||
zi.shape = z.shape
|
||||
assert_array_almost_equal(z, zi)
|
||||
|
||||
|
||||
def check_2drbf3d_interpolation(function):
|
||||
# Check that the 2-D Rbf function interpolates through the nodes (3D).
|
||||
rng = np.random.RandomState(1234)
|
||||
x = rng.rand(50, ) * 4 - 2
|
||||
y = rng.rand(50, ) * 4 - 2
|
||||
z = rng.rand(50, ) * 4 - 2
|
||||
d0 = x * exp(-x ** 2 - y ** 2)
|
||||
d1 = y * exp(-y ** 2 - x ** 2)
|
||||
d = np.vstack([d0, d1]).T
|
||||
rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
|
||||
di = rbf(x, y, z)
|
||||
di.shape = d.shape
|
||||
assert_array_almost_equal(di, d)
|
||||
|
||||
|
||||
def test_2drbf_interpolation():
|
||||
for function in FUNCTIONS:
|
||||
check_2drbf1d_interpolation(function)
|
||||
check_2drbf2d_interpolation(function)
|
||||
check_2drbf3d_interpolation(function)
|
||||
|
||||
|
||||
def check_rbf1d_regularity(function, atol):
|
||||
# Check that the Rbf function approximates a smooth function well away
|
||||
# from the nodes.
|
||||
x = linspace(0, 10, 9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y, function=function)
|
||||
xi = linspace(0, 10, 100)
|
||||
yi = rbf(xi)
|
||||
msg = f"abs-diff: {abs(yi - sin(xi)).max():f}"
|
||||
assert allclose(yi, sin(xi), atol=atol), msg
|
||||
|
||||
|
||||
def test_rbf_regularity():
|
||||
tolerances = {
|
||||
'multiquadric': 0.1,
|
||||
'inverse multiquadric': 0.15,
|
||||
'gaussian': 0.15,
|
||||
'cubic': 0.15,
|
||||
'quintic': 0.1,
|
||||
'thin-plate': 0.1,
|
||||
'linear': 0.2
|
||||
}
|
||||
for function in FUNCTIONS:
|
||||
check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
|
||||
|
||||
|
||||
def check_2drbf1d_regularity(function, atol):
|
||||
# Check that the 2-D Rbf function approximates a smooth function well away
|
||||
# from the nodes.
|
||||
x = linspace(0, 10, 9)
|
||||
y0 = sin(x)
|
||||
y1 = cos(x)
|
||||
y = np.vstack([y0, y1]).T
|
||||
rbf = Rbf(x, y, function=function, mode='N-D')
|
||||
xi = linspace(0, 10, 100)
|
||||
yi = rbf(xi)
|
||||
msg = f"abs-diff: {abs(yi - np.vstack([sin(xi), cos(xi)]).T).max():f}"
|
||||
assert allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg
|
||||
|
||||
|
||||
def test_2drbf_regularity():
|
||||
tolerances = {
|
||||
'multiquadric': 0.1,
|
||||
'inverse multiquadric': 0.15,
|
||||
'gaussian': 0.15,
|
||||
'cubic': 0.15,
|
||||
'quintic': 0.1,
|
||||
'thin-plate': 0.15,
|
||||
'linear': 0.2
|
||||
}
|
||||
for function in FUNCTIONS:
|
||||
check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
|
||||
|
||||
|
||||
def check_rbf1d_stability(function):
|
||||
# Check that the Rbf function with default epsilon is not subject
|
||||
# to overshoot. Regression for issue #4523.
|
||||
#
|
||||
# Generate some data (fixed random seed hence deterministic)
|
||||
rng = np.random.RandomState(1234)
|
||||
x = np.linspace(0, 10, 50)
|
||||
z = x + 4.0 * rng.randn(len(x))
|
||||
|
||||
rbf = Rbf(x, z, function=function)
|
||||
xi = np.linspace(0, 10, 1000)
|
||||
yi = rbf(xi)
|
||||
|
||||
# subtract the linear trend and make sure there no spikes
|
||||
assert np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1
|
||||
|
||||
def test_rbf_stability():
|
||||
for function in FUNCTIONS:
|
||||
check_rbf1d_stability(function)
|
||||
|
||||
|
||||
def test_default_construction():
|
||||
# Check that the Rbf class can be constructed with the default
|
||||
# multiquadric basis function. Regression test for ticket #1228.
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
|
||||
|
||||
def test_function_is_callable():
|
||||
# Check that the Rbf class can be constructed with function=callable.
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
def linfunc(x):
|
||||
return x
|
||||
rbf = Rbf(x, y, function=linfunc)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
|
||||
|
||||
def test_two_arg_function_is_callable():
|
||||
# Check that the Rbf class can be constructed with a two argument
|
||||
# function=callable.
|
||||
def _func(self, r):
|
||||
return self.epsilon + r
|
||||
|
||||
x = linspace(0,10,9)
|
||||
y = sin(x)
|
||||
rbf = Rbf(x, y, function=_func)
|
||||
yi = rbf(x)
|
||||
assert_array_almost_equal(y, yi)
|
||||
|
||||
|
||||
def test_rbf_epsilon_none():
|
||||
x = linspace(0, 10, 9)
|
||||
y = sin(x)
|
||||
Rbf(x, y, epsilon=None)
|
||||
|
||||
|
||||
def test_rbf_epsilon_none_collinear():
|
||||
# Check that collinear points in one dimension doesn't cause an error
|
||||
# due to epsilon = 0
|
||||
x = [1, 2, 3]
|
||||
y = [4, 4, 4]
|
||||
z = [5, 6, 7]
|
||||
rbf = Rbf(x, y, z, epsilon=None)
|
||||
assert rbf.epsilon > 0
|
||||
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_rbf_concurrency():
|
||||
x = linspace(0, 10, 100)
|
||||
y0 = sin(x)
|
||||
y1 = cos(x)
|
||||
y = np.vstack([y0, y1]).T
|
||||
rbf = Rbf(x, y, mode='N-D')
|
||||
|
||||
def worker_fn(_, interp, xp):
|
||||
interp(xp)
|
||||
|
||||
_run_concurrent_barrier(10, worker_fn, rbf, x)
|
||||
|
||||
|
|
@ -0,0 +1,534 @@
|
|||
import pickle
|
||||
import pytest
|
||||
import numpy as np
|
||||
from numpy.linalg import LinAlgError
|
||||
from scipy._lib._array_api import xp_assert_close
|
||||
from scipy.stats.qmc import Halton
|
||||
from scipy.spatial import cKDTree # type: ignore[attr-defined]
|
||||
from scipy.interpolate._rbfinterp import (
|
||||
_AVAILABLE, _SCALE_INVARIANT, _NAME_TO_MIN_DEGREE, _monomial_powers,
|
||||
RBFInterpolator
|
||||
)
|
||||
from scipy.interpolate import _rbfinterp_pythran
|
||||
from scipy._lib._testutils import _run_concurrent_barrier
|
||||
|
||||
|
||||
def _vandermonde(x, degree):
|
||||
# Returns a matrix of monomials that span polynomials with the specified
|
||||
# degree evaluated at x.
|
||||
powers = _monomial_powers(x.shape[1], degree)
|
||||
return _rbfinterp_pythran._polynomial_matrix(x, powers)
|
||||
|
||||
|
||||
def _1d_test_function(x):
|
||||
# Test function used in Wahba's "Spline Models for Observational Data".
|
||||
# domain ~= (0, 3), range ~= (-1.0, 0.2)
|
||||
x = x[:, 0]
|
||||
y = 4.26*(np.exp(-x) - 4*np.exp(-2*x) + 3*np.exp(-3*x))
|
||||
return y
|
||||
|
||||
|
||||
def _2d_test_function(x):
|
||||
# Franke's test function.
|
||||
# domain ~= (0, 1) X (0, 1), range ~= (0.0, 1.2)
|
||||
x1, x2 = x[:, 0], x[:, 1]
|
||||
term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
|
||||
term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
|
||||
term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
|
||||
term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
|
||||
y = term1 + term2 + term3 + term4
|
||||
return y
|
||||
|
||||
|
||||
def _is_conditionally_positive_definite(kernel, m):
|
||||
# Tests whether the kernel is conditionally positive definite of order m.
|
||||
# See chapter 7 of Fasshauer's "Meshfree Approximation Methods with
|
||||
# MATLAB".
|
||||
nx = 10
|
||||
ntests = 100
|
||||
for ndim in [1, 2, 3, 4, 5]:
|
||||
# Generate sample points with a Halton sequence to avoid samples that
|
||||
# are too close to each other, which can make the matrix singular.
|
||||
seq = Halton(ndim, scramble=False, seed=np.random.RandomState())
|
||||
for _ in range(ntests):
|
||||
x = 2*seq.random(nx) - 1
|
||||
A = _rbfinterp_pythran._kernel_matrix(x, kernel)
|
||||
P = _vandermonde(x, m - 1)
|
||||
Q, R = np.linalg.qr(P, mode='complete')
|
||||
# Q2 forms a basis spanning the space where P.T.dot(x) = 0. Project
|
||||
# A onto this space, and then see if it is positive definite using
|
||||
# the Cholesky decomposition. If not, then the kernel is not c.p.d.
|
||||
# of order m.
|
||||
Q2 = Q[:, P.shape[1]:]
|
||||
B = Q2.T.dot(A).dot(Q2)
|
||||
try:
|
||||
np.linalg.cholesky(B)
|
||||
except np.linalg.LinAlgError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# Sorting the parametrize arguments is necessary to avoid a parallelization
|
||||
# issue described here: https://github.com/pytest-dev/pytest-xdist/issues/432.
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_conditionally_positive_definite(kernel):
|
||||
# Test if each kernel in _AVAILABLE is conditionally positive definite of
|
||||
# order m, where m comes from _NAME_TO_MIN_DEGREE. This is a necessary
|
||||
# condition for the smoothed RBF interpolant to be well-posed in general.
|
||||
m = _NAME_TO_MIN_DEGREE.get(kernel, -1) + 1
|
||||
assert _is_conditionally_positive_definite(kernel, m)
|
||||
|
||||
|
||||
class _TestRBFInterpolator:
|
||||
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
|
||||
def test_scale_invariance_1d(self, kernel):
|
||||
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
|
||||
# shape parameter (when smoothing == 0) in 1d.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
x = 3*seq.random(50)
|
||||
y = _1d_test_function(x)
|
||||
xitp = 3*seq.random(50)
|
||||
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_SCALE_INVARIANT))
|
||||
def test_scale_invariance_2d(self, kernel):
|
||||
# Verify that the functions in _SCALE_INVARIANT are insensitive to the
|
||||
# shape parameter (when smoothing == 0) in 2d.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
x = seq.random(100)
|
||||
y = _2d_test_function(x)
|
||||
xitp = seq.random(100)
|
||||
yitp1 = self.build(x, y, epsilon=1.0, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(x, y, epsilon=2.0, kernel=kernel)(xitp)
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_extreme_domains(self, kernel):
|
||||
# Make sure the interpolant remains numerically stable for very
|
||||
# large/small domains.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
scale = 1e50
|
||||
shift = 1e55
|
||||
|
||||
x = seq.random(100)
|
||||
y = _2d_test_function(x)
|
||||
xitp = seq.random(100)
|
||||
|
||||
if kernel in _SCALE_INVARIANT:
|
||||
yitp1 = self.build(x, y, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(
|
||||
x*scale + shift, y,
|
||||
kernel=kernel
|
||||
)(xitp*scale + shift)
|
||||
else:
|
||||
yitp1 = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
|
||||
yitp2 = self.build(
|
||||
x*scale + shift, y,
|
||||
epsilon=5.0/scale,
|
||||
kernel=kernel
|
||||
)(xitp*scale + shift)
|
||||
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
def test_polynomial_reproduction(self):
|
||||
# If the observed data comes from a polynomial, then the interpolant
|
||||
# should be able to reproduce the polynomial exactly, provided that
|
||||
# `degree` is sufficiently high.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(2, scramble=False, seed=rng)
|
||||
degree = 3
|
||||
|
||||
x = seq.random(50)
|
||||
xitp = seq.random(50)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
|
||||
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
|
||||
|
||||
y = P.dot(poly_coeffs)
|
||||
yitp1 = Pitp.dot(poly_coeffs)
|
||||
yitp2 = self.build(x, y, degree=degree)(xitp)
|
||||
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_chunking(self, monkeypatch):
|
||||
# If the observed data comes from a polynomial, then the interpolant
|
||||
# should be able to reproduce the polynomial exactly, provided that
|
||||
# `degree` is sufficiently high.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(2, scramble=False, seed=rng)
|
||||
degree = 3
|
||||
|
||||
largeN = 1000 + 33
|
||||
# this is large to check that chunking of the RBFInterpolator is tested
|
||||
x = seq.random(50)
|
||||
xitp = seq.random(largeN)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
|
||||
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
|
||||
|
||||
y = P.dot(poly_coeffs)
|
||||
yitp1 = Pitp.dot(poly_coeffs)
|
||||
interp = self.build(x, y, degree=degree)
|
||||
ce_real = interp._chunk_evaluator
|
||||
|
||||
def _chunk_evaluator(*args, **kwargs):
|
||||
kwargs.update(memory_budget=100)
|
||||
return ce_real(*args, **kwargs)
|
||||
|
||||
monkeypatch.setattr(interp, '_chunk_evaluator', _chunk_evaluator)
|
||||
yitp2 = interp(xitp)
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
def test_vector_data(self):
|
||||
# Make sure interpolating a vector field is the same as interpolating
|
||||
# each component separately.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = np.array([_2d_test_function(x),
|
||||
_2d_test_function(x[:, ::-1])]).T
|
||||
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
yitp2 = self.build(x, y[:, 0])(xitp)
|
||||
yitp3 = self.build(x, y[:, 1])(xitp)
|
||||
|
||||
xp_assert_close(yitp1[:, 0], yitp2)
|
||||
xp_assert_close(yitp1[:, 1], yitp3)
|
||||
|
||||
def test_complex_data(self):
|
||||
# Interpolating complex input should be the same as interpolating the
|
||||
# real and complex components.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x) + 1j*_2d_test_function(x[:, ::-1])
|
||||
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
yitp2 = self.build(x, y.real)(xitp)
|
||||
yitp3 = self.build(x, y.imag)(xitp)
|
||||
|
||||
xp_assert_close(yitp1.real, yitp2)
|
||||
xp_assert_close(yitp1.imag, yitp3)
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_interpolation_misfit_1d(self, kernel):
|
||||
# Make sure that each kernel, with its default `degree` and an
|
||||
# appropriate `epsilon`, does a good job at interpolation in 1d.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
ytrue = _1d_test_function(xitp)
|
||||
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
|
||||
|
||||
mse = np.mean((yitp - ytrue)**2)
|
||||
assert mse < 1.0e-4
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_interpolation_misfit_2d(self, kernel):
|
||||
# Make sure that each kernel, with its default `degree` and an
|
||||
# appropriate `epsilon`, does a good job at interpolation in 2d.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x)
|
||||
ytrue = _2d_test_function(xitp)
|
||||
yitp = self.build(x, y, epsilon=5.0, kernel=kernel)(xitp)
|
||||
|
||||
mse = np.mean((yitp - ytrue)**2)
|
||||
assert mse < 2.0e-4
|
||||
|
||||
@pytest.mark.parametrize('kernel', sorted(_AVAILABLE))
|
||||
def test_smoothing_misfit(self, kernel):
|
||||
# Make sure we can find a smoothing parameter for each kernel that
|
||||
# removes a sufficient amount of noise.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(1, scramble=False, seed=rng)
|
||||
|
||||
noise = 0.2
|
||||
rmse_tol = 0.1
|
||||
smoothing_range = 10**np.linspace(-4, 1, 20)
|
||||
|
||||
x = 3*seq.random(100)
|
||||
y = _1d_test_function(x) + rng.normal(0.0, noise, (100,))
|
||||
ytrue = _1d_test_function(x)
|
||||
rmse_within_tol = False
|
||||
for smoothing in smoothing_range:
|
||||
ysmooth = self.build(
|
||||
x, y,
|
||||
epsilon=1.0,
|
||||
smoothing=smoothing,
|
||||
kernel=kernel)(x)
|
||||
rmse = np.sqrt(np.mean((ysmooth - ytrue)**2))
|
||||
if rmse < rmse_tol:
|
||||
rmse_within_tol = True
|
||||
break
|
||||
|
||||
assert rmse_within_tol
|
||||
|
||||
def test_array_smoothing(self):
|
||||
# Test using an array for `smoothing` to give less weight to a known
|
||||
# outlier.
|
||||
rng = np.random.RandomState(0)
|
||||
seq = Halton(1, scramble=False, seed=rng)
|
||||
degree = 2
|
||||
|
||||
x = seq.random(50)
|
||||
P = _vandermonde(x, degree)
|
||||
poly_coeffs = rng.normal(0.0, 1.0, P.shape[1])
|
||||
y = P.dot(poly_coeffs)
|
||||
y_with_outlier = np.copy(y)
|
||||
y_with_outlier[10] += 1.0
|
||||
smoothing = np.zeros((50,))
|
||||
smoothing[10] = 1000.0
|
||||
yitp = self.build(x, y_with_outlier, smoothing=smoothing)(x)
|
||||
# Should be able to reproduce the uncorrupted data almost exactly.
|
||||
xp_assert_close(yitp, y, atol=1e-4)
|
||||
|
||||
def test_inconsistent_x_dimensions_error(self):
|
||||
# ValueError should be raised if the observation points and evaluation
|
||||
# points have a different number of dimensions.
|
||||
y = Halton(2, scramble=False, seed=np.random.RandomState()).random(10)
|
||||
d = _2d_test_function(y)
|
||||
x = Halton(1, scramble=False, seed=np.random.RandomState()).random(10)
|
||||
match = 'Expected the second axis of `x`'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)(x)
|
||||
|
||||
def test_inconsistent_d_length_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(1)
|
||||
match = 'Expected the first axis of `d`'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)
|
||||
|
||||
def test_y_not_2d_error(self):
|
||||
y = np.linspace(0, 1, 5)
|
||||
d = np.zeros(5)
|
||||
match = '`y` must be a 2-dimensional array.'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)
|
||||
|
||||
def test_inconsistent_smoothing_length_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
smoothing = np.ones(1)
|
||||
match = 'Expected `smoothing` to be'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, smoothing=smoothing)
|
||||
|
||||
def test_invalid_kernel_name_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
match = '`kernel` must be one of'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, kernel='test')
|
||||
|
||||
def test_epsilon_not_specified_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
for kernel in _AVAILABLE:
|
||||
if kernel in _SCALE_INVARIANT:
|
||||
continue
|
||||
|
||||
match = '`epsilon` must be specified'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, kernel=kernel)
|
||||
|
||||
def test_x_not_2d_error(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
x = np.linspace(0, 1, 5)
|
||||
d = np.zeros(5)
|
||||
match = '`x` must be a 2-dimensional array.'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d)(x)
|
||||
|
||||
def test_not_enough_observations_error(self):
|
||||
y = np.linspace(0, 1, 1)[:, None]
|
||||
d = np.zeros(1)
|
||||
match = 'At least 2 data points are required'
|
||||
with pytest.raises(ValueError, match=match):
|
||||
self.build(y, d, kernel='thin_plate_spline')
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_degree_warning(self):
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
for kernel, deg in _NAME_TO_MIN_DEGREE.items():
|
||||
# Only test for kernels that its minimum degree is not 0.
|
||||
if deg >= 1:
|
||||
match = f'`degree` should not be below {deg}'
|
||||
with pytest.warns(Warning, match=match):
|
||||
self.build(y, d, epsilon=1.0, kernel=kernel, degree=deg-1)
|
||||
|
||||
def test_minus_one_degree(self):
|
||||
# Make sure a degree of -1 is accepted without any warning.
|
||||
y = np.linspace(0, 1, 5)[:, None]
|
||||
d = np.zeros(5)
|
||||
for kernel, _ in _NAME_TO_MIN_DEGREE.items():
|
||||
self.build(y, d, epsilon=1.0, kernel=kernel, degree=-1)
|
||||
|
||||
def test_rank_error(self):
|
||||
# An error should be raised when `kernel` is "thin_plate_spline" and
|
||||
# observations are 2-D and collinear.
|
||||
y = np.array([[2.0, 0.0], [1.0, 0.0], [0.0, 0.0]])
|
||||
d = np.array([0.0, 0.0, 0.0])
|
||||
match = 'does not have full column rank'
|
||||
with pytest.raises(LinAlgError, match=match):
|
||||
self.build(y, d, kernel='thin_plate_spline')(y)
|
||||
|
||||
def test_single_point(self):
|
||||
# Make sure interpolation still works with only one point (in 1, 2, and
|
||||
# 3 dimensions).
|
||||
for dim in [1, 2, 3]:
|
||||
y = np.zeros((1, dim))
|
||||
d = np.ones((1,))
|
||||
f = self.build(y, d, kernel='linear')(y)
|
||||
xp_assert_close(d, f)
|
||||
|
||||
def test_pickleable(self):
|
||||
# Make sure we can pickle and unpickle the interpolant without any
|
||||
# changes in the behavior.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState(2305982309))
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
|
||||
interp = self.build(x, y)
|
||||
|
||||
yitp1 = interp(xitp)
|
||||
yitp2 = pickle.loads(pickle.dumps(interp))(xitp)
|
||||
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-16)
|
||||
|
||||
|
||||
class TestRBFInterpolatorNeighborsNone(_TestRBFInterpolator):
|
||||
def build(self, *args, **kwargs):
|
||||
return RBFInterpolator(*args, **kwargs)
|
||||
|
||||
def test_smoothing_limit_1d(self):
|
||||
# For large smoothing parameters, the interpolant should approach a
|
||||
# least squares fit of a polynomial with the specified degree.
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
degree = 3
|
||||
smoothing = 1e8
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
|
||||
yitp1 = self.build(
|
||||
x, y,
|
||||
degree=degree,
|
||||
smoothing=smoothing
|
||||
)(xitp)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
|
||||
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
def test_smoothing_limit_2d(self):
|
||||
# For large smoothing parameters, the interpolant should approach a
|
||||
# least squares fit of a polynomial with the specified degree.
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
degree = 3
|
||||
smoothing = 1e8
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x)
|
||||
|
||||
yitp1 = self.build(
|
||||
x, y,
|
||||
degree=degree,
|
||||
smoothing=smoothing
|
||||
)(xitp)
|
||||
|
||||
P = _vandermonde(x, degree)
|
||||
Pitp = _vandermonde(xitp, degree)
|
||||
yitp2 = Pitp.dot(np.linalg.lstsq(P, y, rcond=None)[0])
|
||||
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
|
||||
class TestRBFInterpolatorNeighbors20(_TestRBFInterpolator):
|
||||
# RBFInterpolator using 20 nearest neighbors.
|
||||
def build(self, *args, **kwargs):
|
||||
return RBFInterpolator(*args, **kwargs, neighbors=20)
|
||||
|
||||
def test_equivalent_to_rbf_interpolator(self):
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x)
|
||||
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
|
||||
yitp2 = []
|
||||
tree = cKDTree(x)
|
||||
for xi in xitp:
|
||||
_, nbr = tree.query(xi, 20)
|
||||
yitp2.append(RBFInterpolator(x[nbr], y[nbr])(xi[None])[0])
|
||||
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
|
||||
def test_concurrency(self):
|
||||
# Check that no segfaults appear with concurrent access to
|
||||
# RbfInterpolator
|
||||
seq = Halton(2, scramble=False, seed=np.random.RandomState(0))
|
||||
x = seq.random(100)
|
||||
xitp = seq.random(100)
|
||||
|
||||
y = _2d_test_function(x)
|
||||
|
||||
interp = self.build(x, y)
|
||||
|
||||
def worker_fn(_, interp, xp):
|
||||
interp(xp)
|
||||
|
||||
_run_concurrent_barrier(10, worker_fn, interp, xitp)
|
||||
|
||||
|
||||
class TestRBFInterpolatorNeighborsInf(TestRBFInterpolatorNeighborsNone):
|
||||
# RBFInterpolator using neighbors=np.inf. This should give exactly the same
|
||||
# results as neighbors=None, but it will be slower.
|
||||
def build(self, *args, **kwargs):
|
||||
return RBFInterpolator(*args, **kwargs, neighbors=np.inf)
|
||||
|
||||
def test_equivalent_to_rbf_interpolator(self):
|
||||
seq = Halton(1, scramble=False, seed=np.random.RandomState())
|
||||
|
||||
x = 3*seq.random(50)
|
||||
xitp = 3*seq.random(50)
|
||||
|
||||
y = _1d_test_function(x)
|
||||
yitp1 = self.build(x, y)(xitp)
|
||||
yitp2 = RBFInterpolator(x, y)(xitp)
|
||||
|
||||
xp_assert_close(yitp1, yitp2, atol=1e-8)
|
||||
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue