up follow livre
This commit is contained in:
parent
b4b4398bb0
commit
3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,311 @@
|
|||
"""Helpers to utilize existing stft / istft tests for testing `ShortTimeFFT`.
|
||||
|
||||
This module provides the functions stft_compare() and istft_compare(), which,
|
||||
compares the output between the existing (i)stft() and the shortTimeFFT based
|
||||
_(i)stft_wrapper() implementations in this module.
|
||||
|
||||
For testing add the following imports to the file ``tests/test_spectral.py``::
|
||||
|
||||
from ._scipy_spectral_test_shim import stft_compare as stft
|
||||
from ._scipy_spectral_test_shim import istft_compare as istft
|
||||
|
||||
and remove the existing imports of stft and istft.
|
||||
|
||||
The idea of these wrappers is not to provide a backward-compatible interface
|
||||
but to demonstrate that the ShortTimeFFT implementation is at least as capable
|
||||
as the existing one and delivers comparable results. Furthermore, the
|
||||
wrappers highlight the different philosophies of the implementations,
|
||||
especially in the border handling.
|
||||
"""
|
||||
import platform
|
||||
from typing import cast, Literal
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import assert_allclose
|
||||
|
||||
from scipy.signal import ShortTimeFFT
|
||||
from scipy.signal import get_window, stft, istft
|
||||
from scipy.signal._arraytools import const_ext, even_ext, odd_ext, zero_ext
|
||||
from scipy.signal._short_time_fft import FFT_MODE_TYPE
|
||||
from scipy.signal._spectral_py import _triage_segments
|
||||
|
||||
|
||||
def _stft_wrapper(x, fs=1.0, window='hann', nperseg=256, noverlap=None,
|
||||
nfft=None, detrend=False, return_onesided=True,
|
||||
boundary='zeros', padded=True, axis=-1, scaling='spectrum'):
|
||||
"""Wrapper for the SciPy `stft()` function based on `ShortTimeFFT` for
|
||||
unit testing.
|
||||
|
||||
Handling the boundary and padding is where `ShortTimeFFT` and `stft()`
|
||||
differ in behavior. Parts of `_spectral_helper()` were copied to mimic
|
||||
the` stft()` behavior.
|
||||
|
||||
This function is meant to be solely used by `stft_compare()`.
|
||||
"""
|
||||
if scaling not in ('psd', 'spectrum'): # same errors as in original stft:
|
||||
raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!")
|
||||
|
||||
# The following lines are taken from the original _spectral_helper():
|
||||
boundary_funcs = {'even': even_ext,
|
||||
'odd': odd_ext,
|
||||
'constant': const_ext,
|
||||
'zeros': zero_ext,
|
||||
None: None}
|
||||
|
||||
if boundary not in boundary_funcs:
|
||||
raise ValueError(f"Unknown boundary option '{boundary}', must be one" +
|
||||
f" of: {list(boundary_funcs.keys())}")
|
||||
if x.size == 0:
|
||||
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
|
||||
|
||||
if nperseg is not None: # if specified by user
|
||||
nperseg = int(nperseg)
|
||||
if nperseg < 1:
|
||||
raise ValueError('nperseg must be a positive integer')
|
||||
|
||||
# parse window; if array like, then set nperseg = win.shape
|
||||
win, nperseg = _triage_segments(window, nperseg,
|
||||
input_length=x.shape[axis])
|
||||
|
||||
if nfft is None:
|
||||
nfft = nperseg
|
||||
elif nfft < nperseg:
|
||||
raise ValueError('nfft must be greater than or equal to nperseg.')
|
||||
else:
|
||||
nfft = int(nfft)
|
||||
|
||||
if noverlap is None:
|
||||
noverlap = nperseg//2
|
||||
else:
|
||||
noverlap = int(noverlap)
|
||||
if noverlap >= nperseg:
|
||||
raise ValueError('noverlap must be less than nperseg.')
|
||||
nstep = nperseg - noverlap
|
||||
n = x.shape[axis]
|
||||
|
||||
# Padding occurs after boundary extension, so that the extended signal ends
|
||||
# in zeros, instead of introducing an impulse at the end.
|
||||
# I.e. if x = [..., 3, 2]
|
||||
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
|
||||
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
|
||||
|
||||
if boundary is not None:
|
||||
ext_func = boundary_funcs[boundary]
|
||||
# Extend by nperseg//2 in front and back:
|
||||
x = ext_func(x, nperseg//2, axis=axis)
|
||||
|
||||
if padded:
|
||||
# Pad to integer number of windowed segments
|
||||
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
|
||||
x = np.moveaxis(x, axis, -1)
|
||||
|
||||
# This is an edge case where shortTimeFFT returns one more time slice
|
||||
# than the Scipy stft() shorten to remove last time slice:
|
||||
if n % 2 == 1 and nperseg % 2 == 1 and noverlap % 2 == 1:
|
||||
x = x[..., : -1]
|
||||
|
||||
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
|
||||
zeros_shape = list(x.shape[:-1]) + [nadd]
|
||||
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
|
||||
x = np.moveaxis(x, -1, axis)
|
||||
|
||||
# ... end original _spectral_helper() code.
|
||||
scale_to = {'spectrum': 'magnitude', 'psd': 'psd'}[scaling]
|
||||
|
||||
if np.iscomplexobj(x) and return_onesided:
|
||||
return_onesided = False
|
||||
# using cast() to make mypy happy:
|
||||
fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided')
|
||||
|
||||
ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft,
|
||||
scale_to=scale_to, phase_shift=None)
|
||||
|
||||
k_off = nperseg // 2
|
||||
p0 = 0 # ST.lower_border_end[1] + 1
|
||||
nn = x.shape[axis] if padded else n+k_off+1
|
||||
# number of frames akin to legacy stft computation
|
||||
p1 = (x.shape[axis] - nperseg) // nstep + 1
|
||||
|
||||
detr = None if detrend is False else detrend
|
||||
Sxx = ST.stft_detrend(x, detr, p0, p1, k_offset=k_off, axis=axis)
|
||||
t = ST.t(nn, 0, p1 - p0, k_offset=0 if boundary is not None else k_off)
|
||||
if x.dtype in (np.float32, np.complex64):
|
||||
Sxx = Sxx.astype(np.complex64)
|
||||
|
||||
return ST.f, t, Sxx
|
||||
|
||||
|
||||
def _istft_wrapper(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, input_onesided=True, boundary=True, time_axis=-1,
|
||||
freq_axis=-2, scaling='spectrum') -> \
|
||||
tuple[np.ndarray, np.ndarray, tuple[int, int]]:
|
||||
"""Wrapper for the SciPy `istft()` function based on `ShortTimeFFT` for
|
||||
unit testing.
|
||||
|
||||
Note that only option handling is implemented as far as to handle the unit
|
||||
tests. E.g., the case ``nperseg=None`` is not handled.
|
||||
|
||||
This function is meant to be solely used by `istft_compare()`.
|
||||
"""
|
||||
# *** Lines are taken from _spectral_py.istft() ***:
|
||||
if Zxx.ndim < 2:
|
||||
raise ValueError('Input stft must be at least 2d!')
|
||||
|
||||
if freq_axis == time_axis:
|
||||
raise ValueError('Must specify differing time and frequency axes!')
|
||||
|
||||
nseg = Zxx.shape[time_axis]
|
||||
|
||||
if input_onesided:
|
||||
# Assume even segment length
|
||||
n_default = 2*(Zxx.shape[freq_axis] - 1)
|
||||
else:
|
||||
n_default = Zxx.shape[freq_axis]
|
||||
|
||||
# Check windowing parameters
|
||||
if nperseg is None:
|
||||
nperseg = n_default
|
||||
else:
|
||||
nperseg = int(nperseg)
|
||||
if nperseg < 1:
|
||||
raise ValueError('nperseg must be a positive integer')
|
||||
|
||||
if nfft is None:
|
||||
if input_onesided and (nperseg == n_default + 1):
|
||||
# Odd nperseg, no FFT padding
|
||||
nfft = nperseg
|
||||
else:
|
||||
nfft = n_default
|
||||
elif nfft < nperseg:
|
||||
raise ValueError('nfft must be greater than or equal to nperseg.')
|
||||
else:
|
||||
nfft = int(nfft)
|
||||
|
||||
if noverlap is None:
|
||||
noverlap = nperseg//2
|
||||
else:
|
||||
noverlap = int(noverlap)
|
||||
if noverlap >= nperseg:
|
||||
raise ValueError('noverlap must be less than nperseg.')
|
||||
nstep = nperseg - noverlap
|
||||
|
||||
# Get window as array
|
||||
if isinstance(window, str) or type(window) is tuple:
|
||||
win = get_window(window, nperseg)
|
||||
else:
|
||||
win = np.asarray(window)
|
||||
if len(win.shape) != 1:
|
||||
raise ValueError('window must be 1-D')
|
||||
if win.shape[0] != nperseg:
|
||||
raise ValueError(f'window must have length of {nperseg}')
|
||||
|
||||
outputlength = nperseg + (nseg-1)*nstep
|
||||
# *** End block of: Taken from _spectral_py.istft() ***
|
||||
|
||||
# Using cast() to make mypy happy:
|
||||
fft_mode = cast(FFT_MODE_TYPE, 'onesided' if input_onesided else 'twosided')
|
||||
scale_to = cast(Literal['magnitude', 'psd'],
|
||||
{'spectrum': 'magnitude', 'psd': 'psd'}[scaling])
|
||||
|
||||
ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft,
|
||||
scale_to=scale_to, phase_shift=None)
|
||||
|
||||
if boundary:
|
||||
j = nperseg if nperseg % 2 == 0 else nperseg - 1
|
||||
k0 = ST.k_min + nperseg // 2
|
||||
k1 = outputlength - j + k0
|
||||
else:
|
||||
raise NotImplementedError("boundary=False does not make sense with" +
|
||||
"ShortTimeFFT.istft()!")
|
||||
|
||||
x = ST.istft(Zxx, k0=k0, k1=k1, f_axis=freq_axis, t_axis=time_axis)
|
||||
t = np.arange(k1 - k0) * ST.T
|
||||
k_hi = ST.upper_border_begin(k1 - k0)[0]
|
||||
# using cast() to make mypy happy:
|
||||
return t, x, (ST.lower_border_end[0], k_hi)
|
||||
|
||||
|
||||
def stft_compare(x, fs=1.0, window='hann', nperseg=256, noverlap=None,
|
||||
nfft=None, detrend=False, return_onesided=True,
|
||||
boundary='zeros', padded=True, axis=-1, scaling='spectrum'):
|
||||
"""Assert that the results from the existing `stft()` and `_stft_wrapper()`
|
||||
are close to each other.
|
||||
|
||||
For comparing the STFT values an absolute tolerance of the floating point
|
||||
resolution was added to circumvent problems with the following tests:
|
||||
* For float32 the tolerances are much higher in
|
||||
TestSTFT.test_roundtrip_float32()).
|
||||
* The TestSTFT.test_roundtrip_scaling() has a high relative deviation.
|
||||
Interestingly this did not appear in Scipy 1.9.1 but only in the current
|
||||
development version.
|
||||
"""
|
||||
kw = dict(x=x, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,
|
||||
nfft=nfft, detrend=detrend, return_onesided=return_onesided,
|
||||
boundary=boundary, padded=padded, axis=axis, scaling=scaling)
|
||||
f, t, Zxx = stft(**kw)
|
||||
f_wrapper, t_wrapper, Zxx_wrapper = _stft_wrapper(**kw)
|
||||
|
||||
e_msg_part = " of `stft_wrapper()` differ from `stft()`."
|
||||
assert_allclose(f_wrapper, f, err_msg=f"Frequencies {e_msg_part}")
|
||||
assert_allclose(t_wrapper, t, err_msg=f"Time slices {e_msg_part}")
|
||||
|
||||
# Adapted tolerances to account for:
|
||||
atol = np.finfo(Zxx.dtype).resolution * 2
|
||||
assert_allclose(Zxx_wrapper, Zxx, atol=atol,
|
||||
err_msg=f"STFT values {e_msg_part}")
|
||||
return f, t, Zxx
|
||||
|
||||
|
||||
def istft_compare(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None,
|
||||
nfft=None, input_onesided=True, boundary=True, time_axis=-1,
|
||||
freq_axis=-2, scaling='spectrum'):
|
||||
"""Assert that the results from the existing `istft()` and
|
||||
`_istft_wrapper()` are close to each other.
|
||||
|
||||
Quirks:
|
||||
* If ``boundary=False`` the comparison is skipped, since it does not
|
||||
make sense with ShortTimeFFT.istft(). Only used in test
|
||||
TestSTFT.test_roundtrip_boundary_extension().
|
||||
* If ShortTimeFFT.istft() decides the STFT is not invertible, the
|
||||
comparison is skipped, since istft() only emits a warning and does not
|
||||
return a correct result. Only used in
|
||||
ShortTimeFFT.test_roundtrip_not_nola().
|
||||
* For comparing the signals an absolute tolerance of the floating point
|
||||
resolution was added to account for the low accuracy of float32 (Occurs
|
||||
only in TestSTFT.test_roundtrip_float32()).
|
||||
"""
|
||||
kw = dict(Zxx=Zxx, fs=fs, window=window, nperseg=nperseg,
|
||||
noverlap=noverlap, nfft=nfft, input_onesided=input_onesided,
|
||||
boundary=boundary, time_axis=time_axis, freq_axis=freq_axis,
|
||||
scaling=scaling)
|
||||
|
||||
t, x = istft(**kw)
|
||||
if not boundary: # skip test_roundtrip_boundary_extension():
|
||||
return t, x # _istft_wrapper does() not implement this case
|
||||
try: # if inversion fails, istft() only emits a warning:
|
||||
t_wrapper, x_wrapper, (k_lo, k_hi) = _istft_wrapper(**kw)
|
||||
except ValueError as v: # Do nothing if inversion fails:
|
||||
if v.args[0] == "Short-time Fourier Transform not invertible!":
|
||||
return t, x
|
||||
raise v
|
||||
|
||||
e_msg_part = " of `istft_wrapper()` differ from `istft()`"
|
||||
assert_allclose(t, t_wrapper, err_msg=f"Sample times {e_msg_part}")
|
||||
|
||||
# Adapted tolerances to account for resolution loss:
|
||||
atol = np.finfo(x.dtype).resolution*2 # instead of default atol = 0
|
||||
rtol = 1e-7 # default for np.allclose()
|
||||
|
||||
# Relax atol on 32-Bit platforms a bit to pass CI tests.
|
||||
# - Not clear why there are discrepancies (in the FFT maybe?)
|
||||
# - Not sure what changed on 'i686' since earlier on those test passed
|
||||
if x.dtype == np.float32 and platform.machine() == 'i686':
|
||||
# float32 gets only used by TestSTFT.test_roundtrip_float32() so
|
||||
# we are using the tolerances from there to circumvent CI problems
|
||||
atol, rtol = 1e-4, 1e-5
|
||||
elif platform.machine() in ('aarch64', 'i386', 'i686'):
|
||||
atol = max(atol, 1e-12) # 2e-15 seems too tight for 32-Bit platforms
|
||||
|
||||
assert_allclose(x_wrapper[k_lo:k_hi], x[k_lo:k_hi], atol=atol, rtol=rtol,
|
||||
err_msg=f"Signal values {e_msg_part}")
|
||||
return t, x
|
||||
122
venv/lib/python3.13/site-packages/scipy/signal/tests/mpsig.py
Normal file
122
venv/lib/python3.13/site-packages/scipy/signal/tests/mpsig.py
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
"""
|
||||
Some signal functions implemented using mpmath.
|
||||
"""
|
||||
|
||||
try:
|
||||
import mpmath
|
||||
except ImportError:
|
||||
mpmath = None
|
||||
|
||||
|
||||
def _prod(seq):
|
||||
"""Returns the product of the elements in the sequence `seq`."""
|
||||
p = 1
|
||||
for elem in seq:
|
||||
p *= elem
|
||||
return p
|
||||
|
||||
|
||||
def _relative_degree(z, p):
|
||||
"""
|
||||
Return relative degree of transfer function from zeros and poles.
|
||||
|
||||
This is simply len(p) - len(z), which must be nonnegative.
|
||||
A ValueError is raised if len(p) < len(z).
|
||||
"""
|
||||
degree = len(p) - len(z)
|
||||
if degree < 0:
|
||||
raise ValueError("Improper transfer function. "
|
||||
"Must have at least as many poles as zeros.")
|
||||
return degree
|
||||
|
||||
|
||||
def _zpkbilinear(z, p, k, fs):
|
||||
"""Bilinear transformation to convert a filter from analog to digital."""
|
||||
|
||||
degree = _relative_degree(z, p)
|
||||
|
||||
fs2 = 2*fs
|
||||
|
||||
# Bilinear transform the poles and zeros
|
||||
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
|
||||
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
|
||||
|
||||
# Any zeros that were at infinity get moved to the Nyquist frequency
|
||||
z_z.extend([-1] * degree)
|
||||
|
||||
# Compensate for gain change
|
||||
numer = _prod(fs2 - z1 for z1 in z)
|
||||
denom = _prod(fs2 - p1 for p1 in p)
|
||||
k_z = k * numer / denom
|
||||
|
||||
return z_z, p_z, k_z.real
|
||||
|
||||
|
||||
def _zpklp2lp(z, p, k, wo=1):
|
||||
"""Transform a lowpass filter to a different cutoff frequency."""
|
||||
|
||||
degree = _relative_degree(z, p)
|
||||
|
||||
# Scale all points radially from origin to shift cutoff frequency
|
||||
z_lp = [wo * z1 for z1 in z]
|
||||
p_lp = [wo * p1 for p1 in p]
|
||||
|
||||
# Each shifted pole decreases gain by wo, each shifted zero increases it.
|
||||
# Cancel out the net change to keep overall gain the same
|
||||
k_lp = k * wo**degree
|
||||
|
||||
return z_lp, p_lp, k_lp
|
||||
|
||||
|
||||
def _butter_analog_poles(n):
|
||||
"""
|
||||
Poles of an analog Butterworth lowpass filter.
|
||||
|
||||
This is the same calculation as scipy.signal.buttap(n) or
|
||||
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
|
||||
and only the poles are returned.
|
||||
"""
|
||||
poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)]
|
||||
return poles
|
||||
|
||||
|
||||
def butter_lp(n, Wn):
|
||||
"""
|
||||
Lowpass Butterworth digital filter design.
|
||||
|
||||
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
|
||||
but it uses mpmath, and the results are returned in lists instead of NumPy
|
||||
arrays.
|
||||
"""
|
||||
zeros = []
|
||||
poles = _butter_analog_poles(n)
|
||||
k = 1
|
||||
fs = 2
|
||||
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
|
||||
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
|
||||
z, p, k = _zpkbilinear(z, p, k, fs=fs)
|
||||
return z, p, k
|
||||
|
||||
|
||||
def zpkfreqz(z, p, k, worN=None):
|
||||
"""
|
||||
Frequency response of a filter in zpk format, using mpmath.
|
||||
|
||||
This is the same calculation as scipy.signal.freqz, but the input is in
|
||||
zpk format, the calculation is performed using mpath, and the results are
|
||||
returned in lists instead of NumPy arrays.
|
||||
"""
|
||||
if worN is None or isinstance(worN, int):
|
||||
N = worN or 512
|
||||
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
|
||||
else:
|
||||
ws = worN
|
||||
|
||||
h = []
|
||||
for wk in ws:
|
||||
zm1 = mpmath.exp(1j * wk)
|
||||
numer = _prod([zm1 - t for t in z])
|
||||
denom = _prod([zm1 - t for t in p])
|
||||
hk = k * numer / denom
|
||||
h.append(hk)
|
||||
return ws, h
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
import numpy as np
|
||||
|
||||
from scipy._lib._array_api import xp_assert_equal
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.signal._arraytools import (axis_slice, axis_reverse,
|
||||
odd_ext, even_ext, const_ext, zero_ext)
|
||||
|
||||
|
||||
class TestArrayTools:
|
||||
|
||||
def test_axis_slice(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
|
||||
s = axis_slice(a, start=0, stop=1, axis=0)
|
||||
xp_assert_equal(s, a[0:1, :])
|
||||
|
||||
s = axis_slice(a, start=-1, axis=0)
|
||||
xp_assert_equal(s, a[-1:, :])
|
||||
|
||||
s = axis_slice(a, start=0, stop=1, axis=1)
|
||||
xp_assert_equal(s, a[:, 0:1])
|
||||
|
||||
s = axis_slice(a, start=-1, axis=1)
|
||||
xp_assert_equal(s, a[:, -1:])
|
||||
|
||||
s = axis_slice(a, start=0, step=2, axis=0)
|
||||
xp_assert_equal(s, a[::2, :])
|
||||
|
||||
s = axis_slice(a, start=0, step=2, axis=1)
|
||||
xp_assert_equal(s, a[:, ::2])
|
||||
|
||||
def test_axis_reverse(self):
|
||||
a = np.arange(12).reshape(3, 4)
|
||||
|
||||
r = axis_reverse(a, axis=0)
|
||||
xp_assert_equal(r, a[::-1, :])
|
||||
|
||||
r = axis_reverse(a, axis=1)
|
||||
xp_assert_equal(r, a[:, ::-1])
|
||||
|
||||
def test_odd_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
odd = odd_ext(a, 2, axis=1)
|
||||
expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
|
||||
[11, 10, 9, 8, 7, 6, 5, 4, 3]])
|
||||
xp_assert_equal(odd, expected)
|
||||
|
||||
odd = odd_ext(a, 1, axis=0)
|
||||
expected = np.array([[-7, -4, -1, 2, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[17, 14, 11, 8, 5]])
|
||||
xp_assert_equal(odd, expected)
|
||||
|
||||
assert_raises(ValueError, odd_ext, a, 2, axis=0)
|
||||
assert_raises(ValueError, odd_ext, a, 5, axis=1)
|
||||
|
||||
def test_even_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
even = even_ext(a, 2, axis=1)
|
||||
expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3],
|
||||
[7, 8, 9, 8, 7, 6, 5, 6, 7]])
|
||||
xp_assert_equal(even, expected)
|
||||
|
||||
even = even_ext(a, 1, axis=0)
|
||||
expected = np.array([[9, 8, 7, 6, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[1, 2, 3, 4, 5]])
|
||||
xp_assert_equal(even, expected)
|
||||
|
||||
assert_raises(ValueError, even_ext, a, 2, axis=0)
|
||||
assert_raises(ValueError, even_ext, a, 5, axis=1)
|
||||
|
||||
def test_const_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
const = const_ext(a, 2, axis=1)
|
||||
expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5],
|
||||
[9, 9, 9, 8, 7, 6, 5, 5, 5]])
|
||||
xp_assert_equal(const, expected)
|
||||
|
||||
const = const_ext(a, 1, axis=0)
|
||||
expected = np.array([[1, 2, 3, 4, 5],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
xp_assert_equal(const, expected)
|
||||
|
||||
def test_zero_ext(self):
|
||||
a = np.array([[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5]])
|
||||
|
||||
zero = zero_ext(a, 2, axis=1)
|
||||
expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0],
|
||||
[0, 0, 9, 8, 7, 6, 5, 0, 0]])
|
||||
xp_assert_equal(zero, expected)
|
||||
|
||||
zero = zero_ext(a, 1, axis=0)
|
||||
expected = np.array([[0, 0, 0, 0, 0],
|
||||
[1, 2, 3, 4, 5],
|
||||
[9, 8, 7, 6, 5],
|
||||
[0, 0, 0, 0, 0]])
|
||||
xp_assert_equal(zero, expected)
|
||||
|
||||
|
|
@ -0,0 +1,365 @@
|
|||
# pylint: disable=missing-docstring
|
||||
import math
|
||||
import numpy as np
|
||||
|
||||
from scipy._lib._array_api import (
|
||||
assert_almost_equal, xp_assert_close, xp_assert_equal
|
||||
)
|
||||
import pytest
|
||||
from pytest import raises
|
||||
|
||||
from scipy import signal
|
||||
|
||||
skip_xp_backends = pytest.mark.skip_xp_backends
|
||||
xfail_xp_backends = pytest.mark.xfail_xp_backends
|
||||
|
||||
|
||||
class TestBSplines:
|
||||
"""Test behaviors of B-splines. Some of the values tested against were
|
||||
returned as of SciPy 1.1.0 and are included for regression testing
|
||||
purposes. Others (at integer points) are compared to theoretical
|
||||
expressions (cf. Unser, Aldroubi, Eden, IEEE TSP 1993, Table 1)."""
|
||||
|
||||
@skip_xp_backends(cpu_only=True, exceptions=["cupy"])
|
||||
def test_spline_filter(self, xp):
|
||||
rng = np.random.RandomState(12457)
|
||||
# Test the type-error branch
|
||||
raises(TypeError, signal.spline_filter, xp.asarray([0]), 0)
|
||||
# Test the real branch
|
||||
data_array_real = rng.rand(12, 12)
|
||||
# make the magnitude exceed 1, and make some negative
|
||||
data_array_real = 10*(1-2*data_array_real)
|
||||
data_array_real = xp.asarray(data_array_real)
|
||||
result_array_real = xp.asarray(
|
||||
[[-.463312621, 8.33391222, .697290949, 5.28390836,
|
||||
5.92066474, 6.59452137, 9.84406950, -8.78324188,
|
||||
7.20675750, -8.17222994, -4.38633345, 9.89917069],
|
||||
[2.67755154, 6.24192170, -3.15730578, 9.87658581,
|
||||
-9.96930425, 3.17194115, -4.50919947, 5.75423446,
|
||||
9.65979824, -8.29066885, .971416087, -2.38331897],
|
||||
[-7.08868346, 4.89887705, -1.37062289, 7.70705838,
|
||||
2.51526461, 3.65885497, 5.16786604, -8.77715342e-03,
|
||||
4.10533325, 9.04761993, -.577960351, 9.86382519],
|
||||
[-4.71444301, -1.68038985, 2.84695116, 1.14315938,
|
||||
-3.17127091, 1.91830461, 7.13779687, -5.35737482,
|
||||
-9.66586425, -9.87717456, 9.93160672, 4.71948144],
|
||||
[9.49551194, -1.92958436, 6.25427993, -9.05582911,
|
||||
3.97562282, 7.68232426, -1.04514824, -5.86021443,
|
||||
-8.43007451, 5.47528997, 2.06330736, -8.65968112],
|
||||
[-8.91720100, 8.87065356, 3.76879937, 2.56222894,
|
||||
-.828387146, 8.72288903, 6.42474741, -6.84576083,
|
||||
9.94724115, 6.90665380, -6.61084494, -9.44907391],
|
||||
[9.25196790, -.774032030, 7.05371046, -2.73505725,
|
||||
2.53953305, -1.82889155, 2.95454824, -1.66362046,
|
||||
5.72478916, -3.10287679, 1.54017123, -7.87759020],
|
||||
[-3.98464539, -2.44316992, -1.12708657, 1.01725672,
|
||||
-8.89294671, -5.42145629, -6.16370321, 2.91775492,
|
||||
9.64132208, .702499998, -2.02622392, 1.56308431],
|
||||
[-2.22050773, 7.89951554, 5.98970713, -7.35861835,
|
||||
5.45459283, -7.76427957, 3.67280490, -4.05521315,
|
||||
4.51967507, -3.22738749, -3.65080177, 3.05630155],
|
||||
[-6.21240584, -.296796126, -8.34800163, 9.21564563,
|
||||
-3.61958784, -4.77120006, -3.99454057, 1.05021988e-03,
|
||||
-6.95982829, 6.04380797, 8.43181250, -2.71653339],
|
||||
[1.19638037, 6.99718842e-02, 6.72020394, -2.13963198,
|
||||
3.75309875, -5.70076744, 5.92143551, -7.22150575,
|
||||
-3.77114594, -1.11903194, -5.39151466, 3.06620093],
|
||||
[9.86326886, 1.05134482, -7.75950607, -3.64429655,
|
||||
7.81848957, -9.02270373, 3.73399754, -4.71962549,
|
||||
-7.71144306, 3.78263161, 6.46034818, -4.43444731]], dtype=xp.float64)
|
||||
xp_assert_close(signal.spline_filter(data_array_real, 0),
|
||||
result_array_real)
|
||||
|
||||
@skip_xp_backends(cpu_only=True, exceptions=["cupy"])
|
||||
def test_spline_filter_complex(self, xp):
|
||||
rng = np.random.RandomState(12457)
|
||||
data_array_complex = rng.rand(7, 7) + rng.rand(7, 7)*1j
|
||||
# make the magnitude exceed 1, and make some negative
|
||||
data_array_complex = 10*(1+1j-2*data_array_complex)
|
||||
data_array_complex = xp.asarray(data_array_complex)
|
||||
|
||||
result_array_complex = xp.asarray(
|
||||
[[-4.61489230e-01-1.92994022j, 8.33332443+6.25519943j,
|
||||
6.96300745e-01-9.05576038j, 5.28294849+3.97541356j,
|
||||
5.92165565+7.68240595j, 6.59493160-1.04542804j,
|
||||
9.84503460-5.85946894j],
|
||||
[-8.78262329-8.4295969j, 7.20675516+5.47528982j,
|
||||
-8.17223072+2.06330729j, -4.38633347-8.65968037j,
|
||||
9.89916801-8.91720295j, 2.67755103+8.8706522j,
|
||||
6.24192142+3.76879835j],
|
||||
[-3.15627527+2.56303072j, 9.87658501-0.82838702j,
|
||||
-9.96930313+8.72288895j, 3.17193985+6.42474651j,
|
||||
-4.50919819-6.84576082j, 5.75423431+9.94723988j,
|
||||
9.65979767+6.90665293j],
|
||||
[-8.28993416-6.61064005j, 9.71416473e-01-9.44907284j,
|
||||
-2.38331890+9.25196648j, -7.08868170-0.77403212j,
|
||||
4.89887714+7.05371094j, -1.37062311-2.73505688j,
|
||||
7.70705748+2.5395329j],
|
||||
[2.51528406-1.82964492j, 3.65885472+2.95454836j,
|
||||
5.16786575-1.66362023j, -8.77737999e-03+5.72478867j,
|
||||
4.10533333-3.10287571j, 9.04761887+1.54017115j,
|
||||
-5.77960968e-01-7.87758923j],
|
||||
[9.86398506-3.98528528j, -4.71444130-2.44316983j,
|
||||
-1.68038976-1.12708664j, 2.84695053+1.01725709j,
|
||||
1.14315915-8.89294529j, -3.17127085-5.42145538j,
|
||||
1.91830420-6.16370344j],
|
||||
[7.13875294+2.91851187j, -5.35737514+9.64132309j,
|
||||
-9.66586399+0.70250005j, -9.87717438-2.0262239j,
|
||||
9.93160629+1.5630846j, 4.71948051-2.22050714j,
|
||||
9.49550819+7.8995142j]], dtype=xp.complex128)
|
||||
# FIXME: for complex types, the computations are done in
|
||||
# single precision (reason unclear). When this is changed,
|
||||
# this test needs updating.
|
||||
xp_assert_close(signal.spline_filter(data_array_complex, 0),
|
||||
result_array_complex, rtol=1e-6)
|
||||
|
||||
def test_gauss_spline(self, xp):
|
||||
assert math.isclose(signal.gauss_spline(0, 0), 1.381976597885342)
|
||||
|
||||
xp_assert_close(signal.gauss_spline(xp.asarray([1.]), 1),
|
||||
xp.asarray([0.04865217]), atol=1e-9
|
||||
)
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="deliberate: array-likes are accepted")
|
||||
def test_gauss_spline_list(self, xp):
|
||||
# regression test for gh-12152 (accept array_like)
|
||||
knots = [-1.0, 0.0, -1.0]
|
||||
assert_almost_equal(signal.gauss_spline(knots, 3),
|
||||
np.asarray([0.15418033, 0.6909883, 0.15418033])
|
||||
)
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_cspline1d(self, xp):
|
||||
xp_assert_equal(signal.cspline1d(xp.asarray([0])),
|
||||
xp.asarray([0.], dtype=xp.float64))
|
||||
c1d = xp.asarray([1.21037185, 1.86293902, 2.98834059, 4.11660378,
|
||||
4.78893826], dtype=xp.float64)
|
||||
# test lamda != 0
|
||||
xp_assert_close(signal.cspline1d(xp.asarray([1., 2, 3, 4, 5]), 1), c1d)
|
||||
c1d0 = xp.asarray([0.78683946, 2.05333735, 2.99981113, 3.94741812,
|
||||
5.21051638], dtype=xp.float64)
|
||||
xp_assert_close(signal.cspline1d(xp.asarray([1., 2, 3, 4, 5])), c1d0)
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_qspline1d(self, xp):
|
||||
xp_assert_equal(signal.qspline1d(xp.asarray([0])),
|
||||
xp.asarray([0.], dtype=xp.float64))
|
||||
# test lamda != 0
|
||||
raises(ValueError, signal.qspline1d, xp.asarray([1., 2, 3, 4, 5]), 1.)
|
||||
raises(ValueError, signal.qspline1d, xp.asarray([1., 2, 3, 4, 5]), -1.)
|
||||
q1d0 = xp.asarray([0.85350007, 2.02441743, 2.99999534, 3.97561055,
|
||||
5.14634135], dtype=xp.float64)
|
||||
xp_assert_close(
|
||||
signal.qspline1d(xp.asarray([1., 2, 3, 4, 5], dtype=xp.float64)), q1d0
|
||||
)
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_cspline1d_eval(self, xp):
|
||||
r = signal.cspline1d_eval(xp.asarray([0., 0], dtype=xp.float64),
|
||||
xp.asarray([0.], dtype=xp.float64))
|
||||
xp_assert_close(r, xp.asarray([0.], dtype=xp.float64))
|
||||
|
||||
r = signal.cspline1d_eval(xp.asarray([1., 0, 1], dtype=xp.float64),
|
||||
xp.asarray([], dtype=xp.float64))
|
||||
xp_assert_equal(r, xp.asarray([], dtype=xp.float64))
|
||||
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
|
||||
dx = x[1] - x[0]
|
||||
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
|
||||
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
|
||||
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
|
||||
12.5]
|
||||
y = xp.asarray([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
|
||||
1.396, 4.094])
|
||||
cj = signal.cspline1d(y)
|
||||
newy = xp.asarray([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068,
|
||||
4.21600281, 6.04643068, 6.864, 5.16924703, 3.514,
|
||||
4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433,
|
||||
7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396,
|
||||
2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879,
|
||||
7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759,
|
||||
6.80717667, 6.203, 4.41570658], dtype=xp.float64)
|
||||
xp_assert_close(
|
||||
signal.cspline1d_eval(cj, xp.asarray(newx), dx=dx, x0=x[0]), newy
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError,
|
||||
match="Spline coefficients 'cj' must not be empty."):
|
||||
signal.cspline1d_eval(xp.asarray([], dtype=xp.float64),
|
||||
xp.asarray([0.0], dtype=xp.float64))
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
def test_qspline1d_eval(self, xp):
|
||||
xp_assert_close(signal.qspline1d_eval(xp.asarray([0., 0]), xp.asarray([0.])),
|
||||
xp.asarray([0.])
|
||||
)
|
||||
xp_assert_equal(signal.qspline1d_eval(xp.asarray([1., 0, 1]), xp.asarray([])),
|
||||
xp.asarray([])
|
||||
)
|
||||
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
|
||||
dx = x[1] - x[0]
|
||||
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
|
||||
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
|
||||
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
|
||||
12.5]
|
||||
y = xp.asarray([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
|
||||
1.396, 4.094])
|
||||
cj = signal.qspline1d(y)
|
||||
newy = xp.asarray([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915,
|
||||
4.21600002, 5.91436915, 6.864, 5.18390821, 3.514,
|
||||
4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433,
|
||||
7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396,
|
||||
2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879,
|
||||
7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759,
|
||||
6.71900226, 6.203, 4.49418159], dtype=xp.float64)
|
||||
r = signal.qspline1d_eval(
|
||||
cj, xp.asarray(newx, dtype=xp.float64), dx=dx, x0=x[0]
|
||||
)
|
||||
xp_assert_close(r, newy)
|
||||
|
||||
with pytest.raises(ValueError,
|
||||
match="Spline coefficients 'cj' must not be empty."):
|
||||
signal.qspline1d_eval(xp.asarray([], dtype=xp.float64),
|
||||
xp.asarray([0.0], dtype=xp.float64))
|
||||
|
||||
|
||||
# i/o dtypes with scipy 1.9.1, likely fixed by backwards compat
|
||||
sepfir_dtype_map = {np.uint8: np.float32, int: np.float64,
|
||||
np.float32: np.float32, float: float,
|
||||
np.complex64: np.complex64, complex: complex}
|
||||
|
||||
|
||||
@skip_xp_backends(np_only=True)
|
||||
class TestSepfir2d:
|
||||
def test_sepfir2d_invalid_filter(self, xp):
|
||||
filt = xp.asarray([1.0, 2.0, 4.0, 2.0, 1.0])
|
||||
image = np.random.rand(7, 9)
|
||||
image = xp.asarray(image)
|
||||
# No error for odd lengths
|
||||
signal.sepfir2d(image, filt, filt[2:])
|
||||
|
||||
# Row or column filter must be odd
|
||||
with pytest.raises(ValueError, match="odd length"):
|
||||
signal.sepfir2d(image, filt, filt[1:])
|
||||
with pytest.raises(ValueError, match="odd length"):
|
||||
signal.sepfir2d(image, filt[1:], filt)
|
||||
|
||||
# Filters must be 1-dimensional
|
||||
with pytest.raises(ValueError, match="object too deep"):
|
||||
signal.sepfir2d(image, xp.reshape(filt, (1, -1)), filt)
|
||||
with pytest.raises(ValueError, match="object too deep"):
|
||||
signal.sepfir2d(image, filt, xp.reshape(filt, (1, -1)))
|
||||
|
||||
def test_sepfir2d_invalid_image(self, xp):
|
||||
filt = xp.asarray([1.0, 2.0, 4.0, 2.0, 1.0])
|
||||
image = np.random.rand(8, 8)
|
||||
image = xp.asarray(image)
|
||||
|
||||
# Image must be 2 dimensional
|
||||
with pytest.raises(ValueError, match="object too deep"):
|
||||
signal.sepfir2d(xp.reshape(image, (4, 4, 4)), filt, filt)
|
||||
|
||||
with pytest.raises(ValueError, match="object of too small depth"):
|
||||
signal.sepfir2d(image[0, :], filt, filt)
|
||||
|
||||
@pytest.mark.parametrize('dtyp',
|
||||
[np.uint8, int, np.float32, float, np.complex64, complex]
|
||||
)
|
||||
def test_simple(self, dtyp, xp):
|
||||
# test values on a paper-and-pencil example
|
||||
a = np.array([[1, 2, 3, 3, 2, 1],
|
||||
[1, 2, 3, 3, 2, 1],
|
||||
[1, 2, 3, 3, 2, 1],
|
||||
[1, 2, 3, 3, 2, 1]], dtype=dtyp)
|
||||
h1 = [0.5, 1, 0.5]
|
||||
h2 = [1]
|
||||
result = signal.sepfir2d(a, h1, h2)
|
||||
dt = sepfir_dtype_map[dtyp]
|
||||
expected = np.asarray([[2.5, 4. , 5.5, 5.5, 4. , 2.5],
|
||||
[2.5, 4. , 5.5, 5.5, 4. , 2.5],
|
||||
[2.5, 4. , 5.5, 5.5, 4. , 2.5],
|
||||
[2.5, 4. , 5.5, 5.5, 4. , 2.5]], dtype=dt)
|
||||
xp_assert_close(result, expected, atol=1e-16)
|
||||
|
||||
result = signal.sepfir2d(a, h2, h1)
|
||||
expected = np.asarray([[2., 4., 6., 6., 4., 2.],
|
||||
[2., 4., 6., 6., 4., 2.],
|
||||
[2., 4., 6., 6., 4., 2.],
|
||||
[2., 4., 6., 6., 4., 2.]], dtype=dt)
|
||||
xp_assert_close(result, expected, atol=1e-16)
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="TODO: convert this test")
|
||||
@pytest.mark.parametrize('dtyp',
|
||||
[np.uint8, int, np.float32, float, np.complex64, complex]
|
||||
)
|
||||
def test_strided(self, dtyp, xp):
|
||||
a = np.array([[1, 2, 3, 3, 2, 1, 1, 2, 3],
|
||||
[1, 2, 3, 3, 2, 1, 1, 2, 3],
|
||||
[1, 2, 3, 3, 2, 1, 1, 2, 3],
|
||||
[1, 2, 3, 3, 2, 1, 1, 2, 3]])
|
||||
h1, h2 = [0.5, 1, 0.5], [1]
|
||||
result_strided = signal.sepfir2d(a[:, ::2], h1, h2)
|
||||
result_contig = signal.sepfir2d(a[:, ::2].copy(), h1, h2)
|
||||
xp_assert_close(result_strided, result_contig, atol=1e-15)
|
||||
assert result_strided.dtype == result_contig.dtype
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="TODO: convert this test")
|
||||
@pytest.mark.xfail(reason="XXX: filt.size > image.shape: flaky")
|
||||
def test_sepfir2d_strided_2(self, xp):
|
||||
# XXX: this test is flaky: fails on some reruns, with
|
||||
# result[0, 1] and result[1, 1] being ~1e+224.
|
||||
filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0, 3.0, 2.0])
|
||||
image = np.random.rand(4, 4)
|
||||
|
||||
expected = np.asarray([[36.018162, 30.239061, 38.71187 , 43.878183],
|
||||
[38.180999, 35.824583, 43.525247, 43.874945],
|
||||
[43.269533, 40.834018, 46.757772, 44.276423],
|
||||
[49.120928, 39.681844, 43.596067, 45.085854]])
|
||||
xp_assert_close(signal.sepfir2d(image, filt, filt[::3]), expected)
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="TODO: convert this test")
|
||||
@pytest.mark.xfail(reason="XXX: flaky. pointers OOB on some platforms")
|
||||
@pytest.mark.parametrize('dtyp',
|
||||
[np.uint8, int, np.float32, float, np.complex64, complex]
|
||||
)
|
||||
def test_sepfir2d_strided_3(self, dtyp, xp):
|
||||
# NB: 'image' and 'filt' dtypes match here. Otherwise we can run into
|
||||
# unsafe casting errors for many combinations. Historically, dtype handling
|
||||
# in `sepfir2d` is a tad baroque; fixing it is an enhancement.
|
||||
filt = np.array([1, 2, 4, 2, 1, 3, 2], dtype=dtyp)
|
||||
image = np.asarray([[0, 3, 0, 1, 2],
|
||||
[2, 2, 3, 3, 3],
|
||||
[0, 1, 3, 0, 3],
|
||||
[2, 3, 0, 1, 3],
|
||||
[3, 3, 2, 1, 2]], dtype=dtyp)
|
||||
|
||||
expected = [[123., 101., 91., 136., 127.],
|
||||
[133., 125., 126., 152., 160.],
|
||||
[136., 137., 150., 162., 177.],
|
||||
[133., 124., 132., 148., 147.],
|
||||
[173., 158., 152., 164., 141.]]
|
||||
expected = np.asarray(expected)
|
||||
result = signal.sepfir2d(image, filt, filt[::3])
|
||||
xp_assert_close(result, expected, atol=1e-15)
|
||||
assert result.dtype == sepfir_dtype_map[dtyp]
|
||||
|
||||
expected = [[22., 35., 41., 31., 47.],
|
||||
[27., 39., 48., 47., 55.],
|
||||
[33., 42., 49., 53., 59.],
|
||||
[39., 44., 41., 36., 48.],
|
||||
[67., 62., 47., 34., 46.]]
|
||||
expected = np.asarray(expected)
|
||||
result = signal.sepfir2d(image, filt[::3], filt[::3])
|
||||
xp_assert_close(result, expected, atol=1e-15)
|
||||
assert result.dtype == sepfir_dtype_map[dtyp]
|
||||
|
||||
|
||||
def test_cspline2d(xp):
|
||||
rng = np.random.RandomState(181819142)
|
||||
image = rng.rand(71, 73)
|
||||
signal.cspline2d(image, 8.0)
|
||||
|
||||
|
||||
def test_qspline2d(xp):
|
||||
rng = np.random.RandomState(181819143)
|
||||
image = rng.rand(71, 73)
|
||||
signal.qspline2d(image)
|
||||
|
|
@ -0,0 +1,424 @@
|
|||
import numpy as np
|
||||
from scipy._lib._array_api import (
|
||||
assert_array_almost_equal, assert_almost_equal, xp_assert_close
|
||||
)
|
||||
|
||||
import pytest
|
||||
from scipy.signal import cont2discrete as c2d
|
||||
from scipy.signal import dlsim, ss2tf, ss2zpk, lsim, lti
|
||||
from scipy.signal import tf2ss, impulse, dimpulse, step, dstep
|
||||
|
||||
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
|
||||
# March 29, 2011
|
||||
|
||||
|
||||
class TestC2D:
|
||||
def test_zoh(self):
|
||||
ac = np.eye(2, dtype=np.float64)
|
||||
bc = np.full((2, 1), 0.5, dtype=np.float64)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.324360635350064)
|
||||
# c and d in discrete should be equal to their continuous counterparts
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cc, cd)
|
||||
assert_array_almost_equal(dc, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_foh(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
# True values are verified with Matlab
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.420839287058789)
|
||||
cd_truth = cc
|
||||
dd_truth = np.array([[0.260262223725224],
|
||||
[0.297442541400256],
|
||||
[-0.144098411624840]])
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_impulse(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [0.0]])
|
||||
|
||||
# True values are verified with Matlab
|
||||
ad_truth = 1.648721270700128 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.412180317675032)
|
||||
cd_truth = cc
|
||||
dd_truth = np.array([[0.4375], [0.5], [0.3125]])
|
||||
dt_requested = 0.5
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='impulse')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_gbt(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
alpha = 1.0 / 3.0
|
||||
|
||||
ad_truth = 1.6 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.3)
|
||||
cd_truth = np.array([[0.9, 1.2],
|
||||
[1.2, 1.2],
|
||||
[1.2, 0.3]])
|
||||
dd_truth = np.array([[0.175],
|
||||
[0.2],
|
||||
[-0.205]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='gbt', alpha=alpha)
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
|
||||
def test_euler(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = 1.5 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.25)
|
||||
cd_truth = np.array([[0.75, 1.0],
|
||||
[1.0, 1.0],
|
||||
[1.0, 0.25]])
|
||||
dd_truth = dc
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='euler')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_backward_diff(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = 2.0 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.5)
|
||||
cd_truth = np.array([[1.5, 2.0],
|
||||
[2.0, 2.0],
|
||||
[2.0, 0.5]])
|
||||
dd_truth = np.array([[0.875],
|
||||
[1.0],
|
||||
[0.295]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='backward_diff')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
|
||||
def test_bilinear(self):
|
||||
ac = np.eye(2)
|
||||
bc = np.full((2, 1), 0.5)
|
||||
cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]])
|
||||
dc = np.array([[0.0], [0.0], [-0.33]])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
ad_truth = (5.0 / 3.0) * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 1.0 / 3.0)
|
||||
cd_truth = np.array([[1.0, 4.0 / 3.0],
|
||||
[4.0 / 3.0, 4.0 / 3.0],
|
||||
[4.0 / 3.0, 1.0 / 3.0]])
|
||||
dd_truth = np.array([[0.291666666666667],
|
||||
[1.0 / 3.0],
|
||||
[-0.121666666666667]])
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='bilinear')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
# Same continuous system again, but change sampling rate
|
||||
|
||||
ad_truth = 1.4 * np.eye(2)
|
||||
bd_truth = np.full((2, 1), 0.2)
|
||||
cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]])
|
||||
dd_truth = np.array([[0.175], [0.2], [-0.205]])
|
||||
|
||||
dt_requested = 1.0 / 3.0
|
||||
|
||||
ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested,
|
||||
method='bilinear')
|
||||
|
||||
assert_array_almost_equal(ad_truth, ad)
|
||||
assert_array_almost_equal(bd_truth, bd)
|
||||
assert_array_almost_equal(cd_truth, cd)
|
||||
assert_array_almost_equal(dd_truth, dd)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_transferfunction(self):
|
||||
numc = np.array([0.25, 0.25, 0.5])
|
||||
denc = np.array([0.75, 0.75, 1.0])
|
||||
|
||||
numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]])
|
||||
dend = np.array([1.0, -1.351394049721225, 0.606530659712634])
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
num, den, dt = c2d((numc, denc), dt_requested, method='zoh')
|
||||
|
||||
assert_array_almost_equal(numd, num)
|
||||
assert_array_almost_equal(dend, den)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_zerospolesgain(self):
|
||||
zeros_c = np.array([0.5, -0.5])
|
||||
poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
|
||||
k_c = 1.0
|
||||
|
||||
zeros_d = [1.23371727305860, 0.735356894461267]
|
||||
polls_d = [0.938148335039729 + 0.346233593780536j,
|
||||
0.938148335039729 - 0.346233593780536j]
|
||||
k_d = 1.0
|
||||
|
||||
dt_requested = 0.5
|
||||
|
||||
zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested,
|
||||
method='zoh')
|
||||
|
||||
assert_array_almost_equal(zeros_d, zeros)
|
||||
assert_array_almost_equal(polls_d, poles)
|
||||
assert_almost_equal(k_d, k)
|
||||
assert_almost_equal(dt_requested, dt)
|
||||
|
||||
def test_gbt_with_sio_tf_and_zpk(self):
|
||||
"""Test method='gbt' with alpha=0.25 for tf and zpk cases."""
|
||||
# State space coefficients for the continuous SIO system.
|
||||
A = -1.0
|
||||
B = 1.0
|
||||
C = 1.0
|
||||
D = 0.5
|
||||
|
||||
# The continuous transfer function coefficients.
|
||||
cnum, cden = ss2tf(A, B, C, D)
|
||||
|
||||
# Continuous zpk representation
|
||||
cz, cp, ck = ss2zpk(A, B, C, D)
|
||||
|
||||
h = 1.0
|
||||
alpha = 0.25
|
||||
|
||||
# Explicit formulas, in the scalar case.
|
||||
Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A)
|
||||
Bd = h * B / (1 - alpha * h * A)
|
||||
Cd = C / (1 - alpha * h * A)
|
||||
Dd = D + alpha * C * Bd
|
||||
|
||||
# Convert the explicit solution to tf
|
||||
dnum, dden = ss2tf(Ad, Bd, Cd, Dd)
|
||||
|
||||
# Compute the discrete tf using cont2discrete.
|
||||
c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha)
|
||||
|
||||
xp_assert_close(dnum, c2dnum)
|
||||
xp_assert_close(dden, c2dden)
|
||||
|
||||
# Convert explicit solution to zpk.
|
||||
dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd)
|
||||
|
||||
# Compute the discrete zpk using cont2discrete.
|
||||
c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha)
|
||||
|
||||
xp_assert_close(dz, c2dz)
|
||||
xp_assert_close(dp, c2dp)
|
||||
xp_assert_close(dk, c2dk)
|
||||
|
||||
def test_discrete_approx(self):
|
||||
"""
|
||||
Test that the solution to the discrete approximation of a continuous
|
||||
system actually approximates the solution to the continuous system.
|
||||
This is an indirect test of the correctness of the implementation
|
||||
of cont2discrete.
|
||||
"""
|
||||
|
||||
def u(t):
|
||||
return np.sin(2.5 * t)
|
||||
|
||||
a = np.array([[-0.01]])
|
||||
b = np.array([[1.0]])
|
||||
c = np.array([[1.0]])
|
||||
d = np.array([[0.2]])
|
||||
x0 = 1.0
|
||||
|
||||
t = np.linspace(0, 10.0, 101)
|
||||
dt = t[1] - t[0]
|
||||
u1 = u(t)
|
||||
|
||||
# Use lsim to compute the solution to the continuous system.
|
||||
t, yout, xout = lsim((a, b, c, d), T=t, U=u1, X0=x0)
|
||||
|
||||
# Convert the continuous system to a discrete approximation.
|
||||
dsys = c2d((a, b, c, d), dt, method='bilinear')
|
||||
|
||||
# Use dlsim with the pairwise averaged input to compute the output
|
||||
# of the discrete system.
|
||||
u2 = 0.5 * (u1[:-1] + u1[1:])
|
||||
t2 = t[:-1]
|
||||
td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0)
|
||||
|
||||
# ymid is the average of consecutive terms of the "exact" output
|
||||
# computed by lsim2. This is what the discrete approximation
|
||||
# actually approximates.
|
||||
ymid = 0.5 * (yout[:-1] + yout[1:])
|
||||
|
||||
xp_assert_close(yd2.ravel(), ymid, rtol=1e-4)
|
||||
|
||||
def test_simo_tf(self):
|
||||
# See gh-5753
|
||||
tf = ([[1, 0], [1, 1]], [1, 1])
|
||||
num, den, dt = c2d(tf, 0.01)
|
||||
|
||||
assert dt == 0.01 # sanity check
|
||||
xp_assert_close(den, [1, -0.990404983], rtol=1e-3)
|
||||
xp_assert_close(num, [[1, -1], [1, -0.99004983]], rtol=1e-3)
|
||||
|
||||
def test_multioutput(self):
|
||||
ts = 0.01 # time step
|
||||
|
||||
tf = ([[1, -3], [1, 5]], [1, 1])
|
||||
num, den, dt = c2d(tf, ts)
|
||||
|
||||
tf1 = (tf[0][0], tf[1])
|
||||
num1, den1, dt1 = c2d(tf1, ts)
|
||||
|
||||
tf2 = (tf[0][1], tf[1])
|
||||
num2, den2, dt2 = c2d(tf2, ts)
|
||||
|
||||
# Sanity checks
|
||||
assert dt == dt1
|
||||
assert dt == dt2
|
||||
|
||||
# Check that we get the same results
|
||||
xp_assert_close(num, np.vstack((num1, num2)), rtol=1e-13)
|
||||
|
||||
# Single input, so the denominator should
|
||||
# not be multidimensional like the numerator
|
||||
xp_assert_close(den, den1, rtol=1e-13)
|
||||
xp_assert_close(den, den2, rtol=1e-13)
|
||||
|
||||
class TestC2dLti:
|
||||
def test_c2d_ss(self):
|
||||
# StateSpace
|
||||
A = np.array([[-0.3, 0.1], [0.2, -0.7]])
|
||||
B = np.array([[0], [1]])
|
||||
C = np.array([[1, 0]])
|
||||
D = 0
|
||||
dt = 0.05
|
||||
|
||||
A_res = np.array([[0.985136404135682, 0.004876671474795],
|
||||
[0.009753342949590, 0.965629718236502]])
|
||||
B_res = np.array([[0.000122937599964], [0.049135527547844]])
|
||||
|
||||
sys_ssc = lti(A, B, C, D)
|
||||
sys_ssd = sys_ssc.to_discrete(dt=dt)
|
||||
|
||||
xp_assert_close(sys_ssd.A, A_res)
|
||||
xp_assert_close(sys_ssd.B, B_res)
|
||||
xp_assert_close(sys_ssd.C, C)
|
||||
xp_assert_close(sys_ssd.D, np.zeros_like(sys_ssd.D))
|
||||
|
||||
sys_ssd2 = c2d(sys_ssc, dt=dt)
|
||||
|
||||
xp_assert_close(sys_ssd2.A, A_res)
|
||||
xp_assert_close(sys_ssd2.B, B_res)
|
||||
xp_assert_close(sys_ssd2.C, C)
|
||||
xp_assert_close(sys_ssd2.D, np.zeros_like(sys_ssd2.D))
|
||||
|
||||
def test_c2d_tf(self):
|
||||
|
||||
sys = lti([0.5, 0.3], [1.0, 0.4])
|
||||
sys = sys.to_discrete(0.005)
|
||||
|
||||
# Matlab results
|
||||
num_res = np.array([0.5, -0.485149004980066])
|
||||
den_res = np.array([1.0, -0.980198673306755])
|
||||
|
||||
# Somehow a lot of numerical errors
|
||||
xp_assert_close(sys.den, den_res, atol=0.02)
|
||||
xp_assert_close(sys.num, num_res, atol=0.02)
|
||||
|
||||
|
||||
class TestC2dInvariants:
|
||||
# Some test cases for checking the invariances.
|
||||
# Array of triplets: (system, sample time, number of samples)
|
||||
cases = [
|
||||
(tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10),
|
||||
(tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10),
|
||||
(tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10),
|
||||
]
|
||||
|
||||
# Check that systems discretized with the impulse-invariant
|
||||
# method really hold the invariant
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_impulse_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont = impulse(sys, T=time)
|
||||
_, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'),
|
||||
n=len(time))
|
||||
xp_assert_close(sample_time * yout_cont.ravel(), yout_disc[0].ravel())
|
||||
|
||||
# Step invariant should hold for ZOH discretized systems
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_step_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont = step(sys, T=time)
|
||||
_, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time))
|
||||
xp_assert_close(yout_cont.ravel(), yout_disc[0].ravel())
|
||||
|
||||
# Linear invariant should hold for FOH discretized systems
|
||||
@pytest.mark.parametrize("sys,sample_time,samples_number", cases)
|
||||
def test_linear_invariant(self, sys, sample_time, samples_number):
|
||||
time = np.arange(samples_number) * sample_time
|
||||
_, yout_cont, _ = lsim(sys, T=time, U=time)
|
||||
_, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time)
|
||||
xp_assert_close(yout_cont.ravel(), yout_disc.ravel())
|
||||
221
venv/lib/python3.13/site-packages/scipy/signal/tests/test_czt.py
Normal file
221
venv/lib/python3.13/site-packages/scipy/signal/tests/test_czt.py
Normal file
|
|
@ -0,0 +1,221 @@
|
|||
# This program is public domain
|
||||
# Authors: Paul Kienzle, Nadav Horesh
|
||||
'''
|
||||
A unit test module for czt.py
|
||||
'''
|
||||
import pytest
|
||||
from scipy._lib._array_api import xp_assert_close
|
||||
from scipy.fft import fft
|
||||
from scipy.signal import (czt, zoom_fft, czt_points, CZT, ZoomFFT)
|
||||
import numpy as np
|
||||
|
||||
|
||||
def check_czt(x):
|
||||
# Check that czt is the equivalent of normal fft
|
||||
y = fft(x)
|
||||
y1 = czt(x)
|
||||
xp_assert_close(y1, y, rtol=1e-13)
|
||||
|
||||
# Check that interpolated czt is the equivalent of normal fft
|
||||
y = fft(x, 100*len(x))
|
||||
y1 = czt(x, 100*len(x))
|
||||
xp_assert_close(y1, y, rtol=1e-12)
|
||||
|
||||
|
||||
def check_zoom_fft(x):
|
||||
# Check that zoom_fft is the equivalent of normal fft
|
||||
y = fft(x)
|
||||
y1 = zoom_fft(x, [0, 2-2./len(y)], endpoint=True)
|
||||
xp_assert_close(y1, y, rtol=1e-11, atol=1e-14)
|
||||
y1 = zoom_fft(x, [0, 2])
|
||||
xp_assert_close(y1, y, rtol=1e-11, atol=1e-14)
|
||||
|
||||
# Test fn scalar
|
||||
y1 = zoom_fft(x, 2-2./len(y), endpoint=True)
|
||||
xp_assert_close(y1, y, rtol=1e-11, atol=1e-14)
|
||||
y1 = zoom_fft(x, 2)
|
||||
xp_assert_close(y1, y, rtol=1e-11, atol=1e-14)
|
||||
|
||||
# Check that zoom_fft with oversampling is equivalent to zero padding
|
||||
over = 10
|
||||
yover = fft(x, over*len(x))
|
||||
y2 = zoom_fft(x, [0, 2-2./len(yover)], m=len(yover), endpoint=True)
|
||||
xp_assert_close(y2, yover, rtol=1e-12, atol=1e-10)
|
||||
y2 = zoom_fft(x, [0, 2], m=len(yover))
|
||||
xp_assert_close(y2, yover, rtol=1e-12, atol=1e-10)
|
||||
|
||||
# Check that zoom_fft works on a subrange
|
||||
w = np.linspace(0, 2-2./len(x), len(x))
|
||||
f1, f2 = w[3], w[6]
|
||||
y3 = zoom_fft(x, [f1, f2], m=3*over+1, endpoint=True)
|
||||
idx3 = slice(3*over, 6*over+1)
|
||||
xp_assert_close(y3, yover[idx3], rtol=1e-13)
|
||||
|
||||
|
||||
def test_1D():
|
||||
# Test of 1D version of the transforms
|
||||
|
||||
rng = np.random.RandomState(0) # Deterministic randomness
|
||||
|
||||
# Random signals
|
||||
lengths = rng.randint(8, 200, 20)
|
||||
np.append(lengths, 1)
|
||||
for length in lengths:
|
||||
x = rng.random(length)
|
||||
check_zoom_fft(x)
|
||||
check_czt(x)
|
||||
|
||||
# Gauss
|
||||
t = np.linspace(-2, 2, 128)
|
||||
x = np.exp(-t**2/0.01)
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Linear
|
||||
x = [1, 2, 3, 4, 5, 6, 7]
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Check near powers of two
|
||||
check_zoom_fft(range(126-31))
|
||||
check_zoom_fft(range(127-31))
|
||||
check_zoom_fft(range(128-31))
|
||||
check_zoom_fft(range(129-31))
|
||||
check_zoom_fft(range(130-31))
|
||||
|
||||
# Check transform on n-D array input
|
||||
x = np.reshape(np.arange(3*2*28), (3, 2, 28))
|
||||
y1 = zoom_fft(x, [0, 2-2./28])
|
||||
y2 = zoom_fft(x[2, 0, :], [0, 2-2./28])
|
||||
xp_assert_close(y1[2, 0], y2, rtol=1e-13, atol=1e-12)
|
||||
|
||||
y1 = zoom_fft(x, [0, 2], endpoint=False)
|
||||
y2 = zoom_fft(x[2, 0, :], [0, 2], endpoint=False)
|
||||
xp_assert_close(y1[2, 0], y2, rtol=1e-13, atol=1e-12)
|
||||
|
||||
# Random (not a test condition)
|
||||
x = rng.rand(101)
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Spikes
|
||||
t = np.linspace(0, 1, 128)
|
||||
x = np.sin(2*np.pi*t*5)+np.sin(2*np.pi*t*13)
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Sines
|
||||
x = np.zeros(100, dtype=complex)
|
||||
x[[1, 5, 21]] = 1
|
||||
check_zoom_fft(x)
|
||||
|
||||
# Sines plus complex component
|
||||
x += 1j*np.linspace(0, 0.5, x.shape[0])
|
||||
check_zoom_fft(x)
|
||||
|
||||
|
||||
def test_large_prime_lengths():
|
||||
rng = np.random.RandomState(0) # Deterministic randomness
|
||||
for N in (101, 1009, 10007):
|
||||
x = rng.rand(N)
|
||||
y = fft(x)
|
||||
y1 = czt(x)
|
||||
xp_assert_close(y, y1, rtol=1e-12)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_czt_vs_fft():
|
||||
rng = np.random.RandomState(123) # Deterministic randomness
|
||||
random_lengths = rng.exponential(100000, size=10).astype('int')
|
||||
for n in random_lengths:
|
||||
a = rng.randn(n)
|
||||
xp_assert_close(czt(a), fft(a), rtol=1e-11)
|
||||
|
||||
|
||||
def test_empty_input():
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
czt([])
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
zoom_fft([], 0.5)
|
||||
|
||||
|
||||
def test_0_rank_input():
|
||||
with pytest.raises(IndexError, match='tuple index out of range'):
|
||||
czt(5)
|
||||
with pytest.raises(IndexError, match='tuple index out of range'):
|
||||
zoom_fft(5, 0.5)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('impulse', ([0, 0, 1], [0, 0, 1, 0, 0],
|
||||
np.concatenate((np.array([0, 0, 1]),
|
||||
np.zeros(100)))))
|
||||
@pytest.mark.parametrize('m', (1, 3, 5, 8, 101, 1021))
|
||||
@pytest.mark.parametrize('a', (1, 2, 0.5, 1.1))
|
||||
# Step that tests away from the unit circle, but not so far it explodes from
|
||||
# numerical error
|
||||
@pytest.mark.parametrize('w', (None, 0.98534 + 0.17055j))
|
||||
def test_czt_math(impulse, m, w, a):
|
||||
# z-transform of an impulse is 1 everywhere
|
||||
xp_assert_close(czt(impulse[2:], m=m, w=w, a=a),
|
||||
np.ones(m, dtype=np.complex128), rtol=1e-10)
|
||||
|
||||
# z-transform of a delayed impulse is z**-1
|
||||
xp_assert_close(czt(impulse[1:], m=m, w=w, a=a),
|
||||
czt_points(m=m, w=w, a=a)**-1, rtol=1e-10)
|
||||
|
||||
# z-transform of a 2-delayed impulse is z**-2
|
||||
xp_assert_close(czt(impulse, m=m, w=w, a=a),
|
||||
czt_points(m=m, w=w, a=a)**-2, rtol=1e-10)
|
||||
|
||||
|
||||
def test_int_args():
|
||||
# Integer argument `a` was producing all 0s
|
||||
xp_assert_close(abs(czt([0, 1], m=10, a=2)), 0.5*np.ones(10), rtol=1e-15)
|
||||
xp_assert_close(czt_points(11, w=2),
|
||||
1/(2**np.arange(11, dtype=np.complex128)), rtol=1e-30)
|
||||
|
||||
|
||||
def test_czt_points():
|
||||
for N in (1, 2, 3, 8, 11, 100, 101, 10007):
|
||||
xp_assert_close(czt_points(N), np.exp(2j*np.pi*np.arange(N)/N),
|
||||
rtol=1e-30)
|
||||
|
||||
xp_assert_close(czt_points(7, w=1), np.ones(7, dtype=np.complex128), rtol=1e-30)
|
||||
xp_assert_close(czt_points(11, w=2.),
|
||||
1/(2**np.arange(11, dtype=np.complex128)), rtol=1e-30)
|
||||
|
||||
func = CZT(12, m=11, w=2., a=1)
|
||||
xp_assert_close(func.points(), 1/(2**np.arange(11)), rtol=1e-30)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('cls, args', [(CZT, (100,)), (ZoomFFT, (100, 0.2))])
|
||||
def test_CZT_size_mismatch(cls, args):
|
||||
# Data size doesn't match function's expected size
|
||||
myfunc = cls(*args)
|
||||
with pytest.raises(ValueError, match='CZT defined for'):
|
||||
myfunc(np.arange(5))
|
||||
|
||||
|
||||
def test_invalid_range():
|
||||
with pytest.raises(ValueError, match='2-length sequence'):
|
||||
ZoomFFT(100, [1, 2, 3])
|
||||
|
||||
|
||||
@pytest.mark.parametrize('m', [0, -11, 5.5, 4.0])
|
||||
def test_czt_points_errors(m):
|
||||
# Invalid number of points
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
czt_points(m)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('size', [0, -5, 3.5, 4.0])
|
||||
def test_nonsense_size(size):
|
||||
# Numpy and Scipy fft() give ValueError for 0 output size, so we do, too
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
CZT(size, 3)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
ZoomFFT(size, 0.2, 3)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
CZT(3, size)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
ZoomFFT(3, 0.2, size)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
czt([1, 2, 3], size)
|
||||
with pytest.raises(ValueError, match='Invalid number of CZT'):
|
||||
zoom_fft([1, 2, 3], 0.2, size)
|
||||
|
|
@ -0,0 +1,599 @@
|
|||
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
|
||||
# April 4, 2011
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import suppress_warnings
|
||||
from pytest import raises as assert_raises
|
||||
from scipy._lib._array_api import (
|
||||
assert_array_almost_equal, assert_almost_equal, xp_assert_close, xp_assert_equal,
|
||||
)
|
||||
|
||||
from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti,
|
||||
StateSpace, TransferFunction, ZerosPolesGain,
|
||||
dfreqresp, dbode, BadCoefficients)
|
||||
|
||||
|
||||
class TestDLTI:
|
||||
|
||||
def test_dlsim(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Create an input matrix with inputs down the columns (3 cols) and its
|
||||
# respective time input vector
|
||||
u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis],
|
||||
np.full((5, 1), 0.01),
|
||||
np.full((5, 1), -0.002)))
|
||||
t_in = np.linspace(0, 2.0, num=5)
|
||||
|
||||
# Define the known result
|
||||
yout_truth = np.array([[-0.001,
|
||||
-0.00073,
|
||||
0.039446,
|
||||
0.0915387,
|
||||
0.13195948]]).T
|
||||
xout_truth = np.asarray([[0, 0],
|
||||
[0.0012, 0.0005],
|
||||
[0.40233, 0.00071],
|
||||
[1.163368, -0.079327],
|
||||
[2.2402985, -0.3035679]])
|
||||
|
||||
tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in)
|
||||
|
||||
assert_array_almost_equal(yout_truth, yout)
|
||||
assert_array_almost_equal(xout_truth, xout)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Make sure input with single-dimension doesn't raise error
|
||||
dlsim((1, 2, 3), 4)
|
||||
|
||||
# Interpolated control - inputs should have different time steps
|
||||
# than the discrete model uses internally
|
||||
u_sparse = u[[0, 4], :]
|
||||
t_sparse = np.asarray([0.0, 2.0])
|
||||
|
||||
tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse)
|
||||
|
||||
assert_array_almost_equal(yout_truth, yout)
|
||||
assert_array_almost_equal(xout_truth, xout)
|
||||
assert len(tout) == len(yout)
|
||||
|
||||
# Transfer functions (assume dt = 0.5)
|
||||
num = np.asarray([1.0, -0.1])
|
||||
den = np.asarray([0.3, 1.0, 0.2])
|
||||
yout_truth = np.array([[0.0,
|
||||
0.0,
|
||||
3.33333333333333,
|
||||
-4.77777777777778,
|
||||
23.0370370370370]]).T
|
||||
|
||||
# Assume use of the first column of the control input built earlier
|
||||
tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Retest the same with a 1-D input vector
|
||||
uflat = np.asarray(u[:, 0])
|
||||
uflat = uflat.reshape((5,))
|
||||
tout, yout = dlsim((num, den, 0.5), uflat, t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# zeros-poles-gain representation
|
||||
zd = np.array([0.5, -0.5])
|
||||
pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
|
||||
k = 1.0
|
||||
yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T
|
||||
|
||||
tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in)
|
||||
|
||||
assert_array_almost_equal(yout, yout_truth)
|
||||
assert_array_almost_equal(t_in, tout)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dlsim, system, u)
|
||||
|
||||
def test_dstep(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Because b.shape[1] == 3, dstep should result in a tuple of three
|
||||
# result vectors
|
||||
yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956,
|
||||
-0.036324, -0.093318, -0.15782348,
|
||||
-0.226628324, -0.2969374948]),
|
||||
np.asarray([-0.1, -0.075, -0.058, -0.04815,
|
||||
-0.04453, -0.0461895, -0.0521812,
|
||||
-0.061588875, -0.073549579,
|
||||
-0.08727047595]),
|
||||
np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239,
|
||||
0.009081, 0.0233295, 0.03945587,
|
||||
0.056657081, 0.0742343737]))
|
||||
|
||||
tout, yout = dstep((a, b, c, d, dt), n=10)
|
||||
|
||||
assert len(yout) == 3
|
||||
|
||||
for i in range(0, len(yout)):
|
||||
assert yout[i].shape[0] == 10
|
||||
assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i])
|
||||
|
||||
# Check that the other two inputs (tf, zpk) will work as well
|
||||
tfin = ([1.0], [1.0, 1.0], 0.5)
|
||||
yout_tfstep = np.asarray([0.0, 1.0, 0.0])
|
||||
tout, yout = dstep(tfin, n=3)
|
||||
assert len(yout) == 1
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
|
||||
|
||||
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
|
||||
tout, yout = dstep(zpkin, n=3)
|
||||
assert len(yout) == 1
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dstep, system)
|
||||
|
||||
def test_dimpulse(self):
|
||||
|
||||
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
|
||||
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
|
||||
c = np.asarray([[0.1, 0.3]])
|
||||
d = np.asarray([[0.0, -0.1, 0.0]])
|
||||
dt = 0.5
|
||||
|
||||
# Because b.shape[1] == 3, dimpulse should result in a tuple of three
|
||||
# result vectors
|
||||
yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084,
|
||||
-0.045884, -0.056994, -0.06450548,
|
||||
-0.068804844, -0.0703091708]),
|
||||
np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362,
|
||||
-0.0016595, -0.0059917, -0.009407675,
|
||||
-0.011960704, -0.01372089695]),
|
||||
np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771,
|
||||
0.011471, 0.0142485, 0.01612637,
|
||||
0.017201211, 0.0175772927]))
|
||||
|
||||
tout, yout = dimpulse((a, b, c, d, dt), n=10)
|
||||
|
||||
assert len(yout) == 3
|
||||
|
||||
for i in range(0, len(yout)):
|
||||
assert yout[i].shape[0] == 10
|
||||
assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i])
|
||||
|
||||
# Check that the other two inputs (tf, zpk) will work as well
|
||||
tfin = ([1.0], [1.0, 1.0], 0.5)
|
||||
yout_tfimpulse = np.asarray([0.0, 1.0, -1.0])
|
||||
tout, yout = dimpulse(tfin, n=3)
|
||||
assert len(yout) == 1
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
|
||||
|
||||
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
|
||||
tout, yout = dimpulse(zpkin, n=3)
|
||||
assert len(yout) == 1
|
||||
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
|
||||
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dimpulse, system)
|
||||
|
||||
def test_dlsim_trivial(self):
|
||||
a = np.array([[0.0]])
|
||||
b = np.array([[0.0]])
|
||||
c = np.array([[0.0]])
|
||||
d = np.array([[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u)
|
||||
xp_assert_equal(tout, np.arange(float(n)))
|
||||
xp_assert_equal(yout, np.zeros((n, 1)))
|
||||
xp_assert_equal(xout, np.zeros((n, 1)))
|
||||
|
||||
def test_dlsim_simple1d(self):
|
||||
a = np.array([[0.5]])
|
||||
b = np.array([[0.0]])
|
||||
c = np.array([[1.0]])
|
||||
d = np.array([[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
|
||||
xp_assert_equal(tout, np.arange(float(n)))
|
||||
expected = (0.5 ** np.arange(float(n))).reshape(-1, 1)
|
||||
xp_assert_equal(yout, expected)
|
||||
xp_assert_equal(xout, expected)
|
||||
|
||||
def test_dlsim_simple2d(self):
|
||||
lambda1 = 0.5
|
||||
lambda2 = 0.25
|
||||
a = np.array([[lambda1, 0.0],
|
||||
[0.0, lambda2]])
|
||||
b = np.array([[0.0],
|
||||
[0.0]])
|
||||
c = np.array([[1.0, 0.0],
|
||||
[0.0, 1.0]])
|
||||
d = np.array([[0.0],
|
||||
[0.0]])
|
||||
n = 5
|
||||
u = np.zeros(n).reshape(-1, 1)
|
||||
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
|
||||
xp_assert_equal(tout, np.arange(float(n)))
|
||||
# The analytical solution:
|
||||
expected = (np.array([lambda1, lambda2]) **
|
||||
np.arange(float(n)).reshape(-1, 1))
|
||||
xp_assert_equal(yout, expected)
|
||||
xp_assert_equal(xout, expected)
|
||||
|
||||
def test_more_step_and_impulse(self):
|
||||
lambda1 = 0.5
|
||||
lambda2 = 0.75
|
||||
a = np.array([[lambda1, 0.0],
|
||||
[0.0, lambda2]])
|
||||
b = np.array([[1.0, 0.0],
|
||||
[0.0, 1.0]])
|
||||
c = np.array([[1.0, 1.0]])
|
||||
d = np.array([[0.0, 0.0]])
|
||||
|
||||
n = 10
|
||||
|
||||
# Check a step response.
|
||||
ts, ys = dstep((a, b, c, d, 1), n=n)
|
||||
|
||||
# Create the exact step response.
|
||||
stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n))
|
||||
stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n))
|
||||
|
||||
xp_assert_close(ys[0][:, 0], stp0)
|
||||
xp_assert_close(ys[1][:, 0], stp1)
|
||||
|
||||
# Check an impulse response with an initial condition.
|
||||
x0 = np.array([1.0, 1.0])
|
||||
ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0)
|
||||
|
||||
# Create the exact impulse response.
|
||||
imp = (np.array([lambda1, lambda2]) **
|
||||
np.arange(-1, n + 1).reshape(-1, 1))
|
||||
imp[0, :] = 0.0
|
||||
# Analytical solution to impulse response
|
||||
y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0)
|
||||
y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0)
|
||||
|
||||
xp_assert_close(yi[0][:, 0], y0)
|
||||
xp_assert_close(yi[1][:, 0], y1)
|
||||
|
||||
# Check that dt=0.1, n=3 gives 3 time values.
|
||||
system = ([1.0], [1.0, -0.5], 0.1)
|
||||
t, (y,) = dstep(system, n=3)
|
||||
xp_assert_close(t, [0, 0.1, 0.2])
|
||||
xp_assert_equal(y.T, [[0, 1.0, 1.5]])
|
||||
t, (y,) = dimpulse(system, n=3)
|
||||
xp_assert_close(t, [0, 0.1, 0.2])
|
||||
xp_assert_equal(y.T, [[0, 1, 0.5]])
|
||||
|
||||
|
||||
class TestDlti:
|
||||
def test_dlti_instantiation(self):
|
||||
# Test that lti can be instantiated.
|
||||
|
||||
dt = 0.05
|
||||
# TransferFunction
|
||||
s = dlti([1], [-1], dt=dt)
|
||||
assert isinstance(s, TransferFunction)
|
||||
assert isinstance(s, dlti)
|
||||
assert not isinstance(s, lti)
|
||||
assert s.dt == dt
|
||||
|
||||
# ZerosPolesGain
|
||||
s = dlti(np.array([]), np.array([-1]), 1, dt=dt)
|
||||
assert isinstance(s, ZerosPolesGain)
|
||||
assert isinstance(s, dlti)
|
||||
assert not isinstance(s, lti)
|
||||
assert s.dt == dt
|
||||
|
||||
# StateSpace
|
||||
s = dlti([1], [-1], 1, 3, dt=dt)
|
||||
assert isinstance(s, StateSpace)
|
||||
assert isinstance(s, dlti)
|
||||
assert not isinstance(s, lti)
|
||||
assert s.dt == dt
|
||||
|
||||
# Number of inputs
|
||||
assert_raises(ValueError, dlti, 1)
|
||||
assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
|
||||
|
||||
|
||||
class TestStateSpaceDisc:
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
StateSpace(1, 1, 1, 1, dt=dt)
|
||||
StateSpace([1], [2], [3], [4], dt=dt)
|
||||
StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
|
||||
np.array([[1, 0]]), np.array([[0]]), dt=dt)
|
||||
StateSpace(1, 1, 1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = StateSpace(1, 2, 3, 4, dt=0.05)
|
||||
assert isinstance(s.to_ss(), StateSpace)
|
||||
assert isinstance(s.to_tf(), TransferFunction)
|
||||
assert isinstance(s.to_zpk(), ZerosPolesGain)
|
||||
|
||||
# Make sure copies work
|
||||
assert StateSpace(s) is not s
|
||||
assert s.to_ss() is not s
|
||||
|
||||
def test_properties(self):
|
||||
# Test setters/getters for cross class properties.
|
||||
# This implicitly tests to_tf() and to_zpk()
|
||||
|
||||
# Getters
|
||||
s = StateSpace(1, 1, 1, 1, dt=0.05)
|
||||
xp_assert_equal(s.poles, [1.])
|
||||
xp_assert_equal(s.zeros, [0.])
|
||||
|
||||
|
||||
class TestTransferFunction:
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
TransferFunction(1, 1, dt=dt)
|
||||
TransferFunction([1], [2], dt=dt)
|
||||
TransferFunction(np.array([1]), np.array([2]), dt=dt)
|
||||
TransferFunction(1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = TransferFunction([1, 0], [1, -1], dt=0.05)
|
||||
assert isinstance(s.to_ss(), StateSpace)
|
||||
assert isinstance(s.to_tf(), TransferFunction)
|
||||
assert isinstance(s.to_zpk(), ZerosPolesGain)
|
||||
|
||||
# Make sure copies work
|
||||
assert TransferFunction(s) is not s
|
||||
assert s.to_tf() is not s
|
||||
|
||||
def test_properties(self):
|
||||
# Test setters/getters for cross class properties.
|
||||
# This implicitly tests to_ss() and to_zpk()
|
||||
|
||||
# Getters
|
||||
s = TransferFunction([1, 0], [1, -1], dt=0.05)
|
||||
xp_assert_equal(s.poles, [1.])
|
||||
xp_assert_equal(s.zeros, [0.])
|
||||
|
||||
|
||||
class TestZerosPolesGain:
|
||||
def test_initialization(self):
|
||||
# Check that all initializations work
|
||||
dt = 0.05
|
||||
ZerosPolesGain(1, 1, 1, dt=dt)
|
||||
ZerosPolesGain([1], [2], 1, dt=dt)
|
||||
ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt)
|
||||
ZerosPolesGain(1, 1, 1, dt=True)
|
||||
|
||||
def test_conversion(self):
|
||||
# Check the conversion functions
|
||||
s = ZerosPolesGain(1, 2, 3, dt=0.05)
|
||||
assert isinstance(s.to_ss(), StateSpace)
|
||||
assert isinstance(s.to_tf(), TransferFunction)
|
||||
assert isinstance(s.to_zpk(), ZerosPolesGain)
|
||||
|
||||
# Make sure copies work
|
||||
assert ZerosPolesGain(s) is not s
|
||||
assert s.to_zpk() is not s
|
||||
|
||||
|
||||
class Test_dfreqresp:
|
||||
|
||||
def test_manual(self):
|
||||
# Test dfreqresp() real part calculation (manual sanity check).
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
w = [0.1, 1, 10]
|
||||
w, H = dfreqresp(system, w=w)
|
||||
|
||||
# test real
|
||||
expected_re = [1.2383, 0.4130, -0.7553]
|
||||
assert_almost_equal(H.real, expected_re, decimal=4)
|
||||
|
||||
# test imag
|
||||
expected_im = [-0.1555, -1.0214, 0.3955]
|
||||
assert_almost_equal(H.imag, expected_im, decimal=4)
|
||||
|
||||
def test_auto(self):
|
||||
# Test dfreqresp() real part calculation.
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
w = [0.1, 1, 10, 100]
|
||||
w, H = dfreqresp(system, w=w)
|
||||
jw = np.exp(w * 1j)
|
||||
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
|
||||
|
||||
# test real
|
||||
expected_re = y.real
|
||||
assert_almost_equal(H.real, expected_re)
|
||||
|
||||
# test imag
|
||||
expected_im = y.imag
|
||||
assert_almost_equal(H.imag, expected_im)
|
||||
|
||||
def test_freq_range(self):
|
||||
# Test that freqresp() finds a reasonable frequency range.
|
||||
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
|
||||
# Expected range is from 0.01 to 10.
|
||||
system = TransferFunction(1, [1, -0.2], dt=0.1)
|
||||
n = 10
|
||||
expected_w = np.linspace(0, np.pi, 10, endpoint=False)
|
||||
w, H = dfreqresp(system, n=n)
|
||||
assert_almost_equal(w, expected_w)
|
||||
|
||||
def test_pole_one(self):
|
||||
# Test that freqresp() doesn't fail on a system with a pole at 0.
|
||||
# integrator, pole at zero: H(s) = 1 / s
|
||||
system = TransferFunction([1], [1, -1], dt=0.1)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, message="divide by zero")
|
||||
sup.filter(RuntimeWarning, message="invalid value encountered")
|
||||
w, H = dfreqresp(system, n=2)
|
||||
assert w[0] == 0. # a fail would give not-a-number
|
||||
|
||||
def test_error(self):
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dfreqresp, system)
|
||||
|
||||
def test_from_state_space(self):
|
||||
# H(z) = 2 / z^3 - 0.5 * z^2
|
||||
|
||||
system_TF = dlti([2], [1, -0.5, 0, 0])
|
||||
|
||||
A = np.array([[0.5, 0, 0],
|
||||
[1, 0, 0],
|
||||
[0, 1, 0]])
|
||||
B = np.array([[1, 0, 0]]).T
|
||||
C = np.array([[0, 0, 2]])
|
||||
D = 0
|
||||
|
||||
system_SS = dlti(A, B, C, D)
|
||||
w = 10.0**np.arange(-3,0,.5)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(BadCoefficients)
|
||||
w1, H1 = dfreqresp(system_TF, w=w)
|
||||
w2, H2 = dfreqresp(system_SS, w=w)
|
||||
|
||||
assert_almost_equal(H1, H2)
|
||||
|
||||
def test_from_zpk(self):
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
system_ZPK = dlti([],[0.2],0.3)
|
||||
system_TF = dlti(0.3, [1, -0.2])
|
||||
w = [0.1, 1, 10, 100]
|
||||
w1, H1 = dfreqresp(system_ZPK, w=w)
|
||||
w2, H2 = dfreqresp(system_TF, w=w)
|
||||
assert_almost_equal(H1, H2)
|
||||
|
||||
|
||||
class Test_bode:
|
||||
|
||||
def test_manual(self):
|
||||
# Test bode() magnitude calculation (manual sanity check).
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
dt = 0.1
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=dt)
|
||||
w = [0.1, 0.5, 1, np.pi]
|
||||
w2, mag, phase = dbode(system, w=w)
|
||||
|
||||
# Test mag
|
||||
expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412]
|
||||
assert_almost_equal(mag, expected_mag, decimal=4)
|
||||
|
||||
# Test phase
|
||||
expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000]
|
||||
assert_almost_equal(phase, expected_phase, decimal=4)
|
||||
|
||||
# Test frequency
|
||||
xp_assert_equal(np.array(w) / dt, w2)
|
||||
|
||||
def test_auto(self):
|
||||
# Test bode() magnitude calculation.
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
|
||||
w = np.array([0.1, 0.5, 1, np.pi])
|
||||
w2, mag, phase = dbode(system, w=w)
|
||||
jw = np.exp(w * 1j)
|
||||
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
|
||||
|
||||
# Test mag
|
||||
expected_mag = 20.0 * np.log10(abs(y))
|
||||
assert_almost_equal(mag, expected_mag)
|
||||
|
||||
# Test phase
|
||||
expected_phase = np.rad2deg(np.angle(y))
|
||||
assert_almost_equal(phase, expected_phase)
|
||||
|
||||
def test_range(self):
|
||||
# Test that bode() finds a reasonable frequency range.
|
||||
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
|
||||
dt = 0.1
|
||||
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
|
||||
n = 10
|
||||
# Expected range is from 0.01 to 10.
|
||||
expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt
|
||||
w, mag, phase = dbode(system, n=n)
|
||||
assert_almost_equal(w, expected_w)
|
||||
|
||||
def test_pole_one(self):
|
||||
# Test that freqresp() doesn't fail on a system with a pole at 0.
|
||||
# integrator, pole at zero: H(s) = 1 / s
|
||||
system = TransferFunction([1], [1, -1], dt=0.1)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(RuntimeWarning, message="divide by zero")
|
||||
sup.filter(RuntimeWarning, message="invalid value encountered")
|
||||
w, mag, phase = dbode(system, n=2)
|
||||
assert w[0] == 0. # a fail would give not-a-number
|
||||
|
||||
def test_imaginary(self):
|
||||
# bode() should not fail on a system with pure imaginary poles.
|
||||
# The test passes if bode doesn't raise an exception.
|
||||
system = TransferFunction([1], [1, 0, 100], dt=0.1)
|
||||
dbode(system, n=2)
|
||||
|
||||
def test_error(self):
|
||||
# Raise an error for continuous-time systems
|
||||
system = lti([1], [1, 1])
|
||||
assert_raises(AttributeError, dbode, system)
|
||||
|
||||
|
||||
class TestTransferFunctionZConversion:
|
||||
"""Test private conversions between 'z' and 'z**-1' polynomials."""
|
||||
|
||||
def test_full(self):
|
||||
# Numerator and denominator same order
|
||||
num = np.asarray([2.0, 3, 4])
|
||||
den = np.asarray([5.0, 6, 7])
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
xp_assert_equal(num, num2)
|
||||
xp_assert_equal(den, den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
xp_assert_equal(num, num2)
|
||||
xp_assert_equal(den, den2)
|
||||
|
||||
def test_numerator(self):
|
||||
# Numerator lower order than denominator
|
||||
num = np.asarray([2.0, 3])
|
||||
den = np.asarray([50, 6, 7])
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
xp_assert_equal([0.0, 2, 3], num2)
|
||||
xp_assert_equal(den, den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
xp_assert_equal([2.0, 3, 0], num2)
|
||||
xp_assert_equal(den, den2)
|
||||
|
||||
def test_denominator(self):
|
||||
# Numerator higher order than denominator
|
||||
num = np.asarray([2., 3, 4])
|
||||
den = np.asarray([5.0, 6])
|
||||
num2, den2 = TransferFunction._z_to_zinv(num, den)
|
||||
xp_assert_equal(num, num2)
|
||||
xp_assert_equal([0.0, 5, 6], den2)
|
||||
|
||||
num2, den2 = TransferFunction._zinv_to_z(num, den)
|
||||
xp_assert_equal(num, num2)
|
||||
xp_assert_equal([5.0, 6, 0], den2)
|
||||
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,851 @@
|
|||
import math
|
||||
import numpy as np
|
||||
|
||||
from numpy.testing import assert_warns
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
import scipy._lib.array_api_extra as xpx
|
||||
from scipy._lib._array_api import (
|
||||
xp_assert_close, xp_assert_equal, assert_almost_equal, assert_array_almost_equal,
|
||||
array_namespace, xp_default_dtype
|
||||
)
|
||||
from scipy.fft import fft, fft2
|
||||
from scipy.signal import (kaiser_beta, kaiser_atten, kaiserord,
|
||||
firwin, firwin2, freqz, remez, firls, minimum_phase, convolve2d, firwin_2d
|
||||
)
|
||||
|
||||
skip_xp_backends = pytest.mark.skip_xp_backends
|
||||
xfail_xp_backends = pytest.mark.xfail_xp_backends
|
||||
|
||||
|
||||
def test_kaiser_beta():
|
||||
b = kaiser_beta(58.7)
|
||||
assert_almost_equal(b, 0.1102 * 50.0)
|
||||
b = kaiser_beta(22.0)
|
||||
assert_almost_equal(b, 0.5842 + 0.07886)
|
||||
b = kaiser_beta(21.0)
|
||||
assert b == 0.0
|
||||
b = kaiser_beta(10.0)
|
||||
assert b == 0.0
|
||||
|
||||
|
||||
def test_kaiser_atten():
|
||||
a = kaiser_atten(1, 1.0)
|
||||
assert a == 7.95
|
||||
a = kaiser_atten(2, 1/np.pi)
|
||||
assert a == 2.285 + 7.95
|
||||
|
||||
|
||||
def test_kaiserord():
|
||||
assert_raises(ValueError, kaiserord, 1.0, 1.0)
|
||||
numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi)
|
||||
assert (numtaps, beta) == (2, 0.0)
|
||||
|
||||
|
||||
class TestFirwin:
|
||||
|
||||
def check_response(self, h, expected_response, tol=.05):
|
||||
xp = array_namespace(h)
|
||||
N = h.shape[0]
|
||||
alpha = 0.5 * (N-1)
|
||||
m = xp.arange(0, N) - alpha # time indices of taps
|
||||
for freq, expected in expected_response:
|
||||
actual = abs(xp.sum(h * xp.exp(-1j * xp.pi * m * freq)))
|
||||
mse = abs(actual - expected)**2
|
||||
assert mse < tol, f'response not as expected, mse={mse:g} > {tol:g}'
|
||||
|
||||
def test_response(self, xp):
|
||||
N = 51
|
||||
f = .5
|
||||
|
||||
# increase length just to try even/odd
|
||||
h = firwin(N, f) # low-pass from 0 to f
|
||||
self.check_response(h, [(.25,1), (.75,0)])
|
||||
|
||||
h = firwin(N+1, f, window='nuttall') # specific window
|
||||
self.check_response(h, [(.25,1), (.75,0)])
|
||||
|
||||
h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass
|
||||
self.check_response(h, [(.25,0), (.75,1)])
|
||||
|
||||
f1, f2, f3, f4 = .2, .4, .6, .8
|
||||
h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter
|
||||
self.check_response(h, [(.1,0), (.3,1), (.5,0)])
|
||||
|
||||
h = firwin(N+4, [f1, f2]) # band-stop filter
|
||||
self.check_response(h, [(.1,1), (.3,0), (.5,1)])
|
||||
|
||||
h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False)
|
||||
self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)])
|
||||
|
||||
h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter
|
||||
self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)])
|
||||
|
||||
h = firwin(N+7, 0.1, width=.03) # low-pass
|
||||
self.check_response(h, [(.05,1), (.75,0)])
|
||||
|
||||
h = firwin(N+8, 0.1, pass_zero=False) # high-pass
|
||||
self.check_response(h, [(.05,0), (.75,1)])
|
||||
|
||||
def mse(self, h, bands):
|
||||
"""Compute mean squared error versus ideal response across frequency
|
||||
band.
|
||||
h -- coefficients
|
||||
bands -- list of (left, right) tuples relative to 1==Nyquist of
|
||||
passbands
|
||||
"""
|
||||
w, H = freqz(h, worN=1024)
|
||||
f = w/np.pi
|
||||
passIndicator = np.zeros(len(w), bool)
|
||||
for left, right in bands:
|
||||
passIndicator |= (f >= left) & (f < right)
|
||||
Hideal = np.where(passIndicator, 1, 0)
|
||||
mse = np.mean(abs(abs(H)-Hideal)**2)
|
||||
return mse
|
||||
|
||||
def test_scaling(self, xp):
|
||||
"""
|
||||
For one lowpass, bandpass, and highpass example filter, this test
|
||||
checks two things:
|
||||
- the mean squared error over the frequency domain of the unscaled
|
||||
filter is smaller than the scaled filter (true for rectangular
|
||||
window)
|
||||
- the response of the scaled filter is exactly unity at the center
|
||||
of the first passband
|
||||
"""
|
||||
N = 11
|
||||
cases = [
|
||||
([.5], True, (0, 1)),
|
||||
([0.2, .6], False, (.4, 1)),
|
||||
([.5], False, (1, 1)),
|
||||
]
|
||||
for cutoff, pass_zero, expected_response in cases:
|
||||
h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones')
|
||||
hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones')
|
||||
if len(cutoff) == 1:
|
||||
if pass_zero:
|
||||
cutoff = [0] + cutoff
|
||||
else:
|
||||
cutoff = cutoff + [1]
|
||||
msg = 'least squares violation'
|
||||
assert self.mse(h, [cutoff]) < self.mse(hs, [cutoff]), msg
|
||||
self.check_response(hs, [expected_response], 1e-12)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
firwin(51, .5, fs=np.array([10, 20]))
|
||||
|
||||
|
||||
class TestFirWinMore:
|
||||
"""Different author, different style, different tests..."""
|
||||
|
||||
def test_lowpass(self, xp):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
cutoff = xp.asarray(0.5)
|
||||
kwargs = dict(cutoff=cutoff, window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = xp.asarray([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=xp.pi*freq_samples)
|
||||
|
||||
assert_array_almost_equal(
|
||||
xp.abs(response),
|
||||
xp.asarray([1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), decimal=5
|
||||
)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs)
|
||||
xp_assert_close(taps, taps_str)
|
||||
|
||||
def test_highpass(self, xp):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
|
||||
# Ensure that ntaps is odd.
|
||||
ntaps |= 1
|
||||
|
||||
cutoff = xp.asarray(0.5)
|
||||
kwargs = dict(cutoff=cutoff, window=('kaiser', beta), scale=False)
|
||||
taps = firwin(ntaps, pass_zero=False, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = xp.asarray([0.0, 0.25, 0.5 - width/2, 0.5 + width/2, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
|
||||
assert_array_almost_equal(xp.abs(response),
|
||||
xp.asarray([0.0, 0.0, 0.0, 1.0, 1.0, 1.0]), decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='highpass', **kwargs)
|
||||
xp_assert_close(taps, taps_str)
|
||||
|
||||
def test_bandpass(self, xp):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(
|
||||
cutoff=xp.asarray([0.3, 0.7]), window=('kaiser', beta), scale=False
|
||||
)
|
||||
taps = firwin(ntaps, pass_zero=False, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = xp.asarray([0.0, 0.2, 0.3 - width/2, 0.3 + width/2, 0.5,
|
||||
0.7 - width/2, 0.7 + width/2, 0.8, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
|
||||
assert_array_almost_equal(xp.abs(response),
|
||||
xp.asarray([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), decimal=5)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs)
|
||||
xp_assert_close(taps, taps_str)
|
||||
|
||||
def test_bandstop_multi(self, xp):
|
||||
width = 0.04
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
kwargs = dict(cutoff=xp.asarray([0.2, 0.5, 0.8]), window=('kaiser', beta),
|
||||
scale=False)
|
||||
taps = firwin(ntaps, **kwargs)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = xp.asarray([0.0, 0.1, 0.2 - width/2, 0.2 + width/2, 0.35,
|
||||
0.5 - width/2, 0.5 + width/2, 0.65,
|
||||
0.8 - width/2, 0.8 + width/2, 0.9, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
|
||||
assert_array_almost_equal(
|
||||
xp.abs(response),
|
||||
xp.asarray([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]),
|
||||
decimal=5
|
||||
)
|
||||
|
||||
taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs)
|
||||
xp_assert_close(taps, taps_str)
|
||||
|
||||
def test_fs_nyq(self, xp):
|
||||
"""Test the fs and nyq keywords."""
|
||||
nyquist = 1000
|
||||
width = 40.0
|
||||
relative_width = width/nyquist
|
||||
ntaps, beta = kaiserord(120, relative_width)
|
||||
taps = firwin(ntaps, cutoff=xp.asarray([300, 700]), window=('kaiser', beta),
|
||||
pass_zero=False, scale=False, fs=2*nyquist)
|
||||
|
||||
# Check the symmetry of taps.
|
||||
assert_array_almost_equal(taps[:ntaps//2], xp.flip(taps)[:ntaps//2])
|
||||
|
||||
# Check the gain at a few samples where
|
||||
# we know it should be approximately 0 or 1.
|
||||
freq_samples = xp.asarray([0.0, 200, 300 - width/2, 300 + width/2, 500,
|
||||
700 - width/2, 700 + width/2, 800, 1000])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist)
|
||||
|
||||
assert_array_almost_equal(xp.abs(response),
|
||||
xp.asarray([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]), decimal=5)
|
||||
|
||||
def test_array_cutoff(self, xp):
|
||||
taps = firwin(3, xp.asarray([.1, .2]))
|
||||
# smoke test against the value computed by scipy==1.5.2
|
||||
xp_assert_close(
|
||||
taps, xp.asarray([-0.00801395, 1.0160279, -0.00801395]), atol=1e-8
|
||||
)
|
||||
|
||||
def test_bad_cutoff(self):
|
||||
"""Test that invalid cutoff argument raises ValueError."""
|
||||
# cutoff values must be greater than 0 and less than 1.
|
||||
assert_raises(ValueError, firwin, 99, -0.5)
|
||||
assert_raises(ValueError, firwin, 99, 1.5)
|
||||
# Don't allow 0 or 1 in cutoff.
|
||||
assert_raises(ValueError, firwin, 99, [0, 0.5])
|
||||
assert_raises(ValueError, firwin, 99, [0.5, 1])
|
||||
# cutoff values must be strictly increasing.
|
||||
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2])
|
||||
assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5])
|
||||
# Must have at least one cutoff value.
|
||||
assert_raises(ValueError, firwin, 99, [])
|
||||
# 2D array not allowed.
|
||||
assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]])
|
||||
# cutoff values must be less than nyq.
|
||||
assert_raises(ValueError, firwin, 99, 50.0, fs=80)
|
||||
assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50)
|
||||
|
||||
def test_even_highpass_raises_value_error(self):
|
||||
"""Test that attempt to create a highpass filter with an even number
|
||||
of taps raises a ValueError exception."""
|
||||
assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False)
|
||||
assert_raises(ValueError, firwin, 40, [.25, 0.5])
|
||||
|
||||
def test_bad_pass_zero(self):
|
||||
"""Test degenerate pass_zero cases."""
|
||||
with assert_raises(ValueError, match="^Parameter pass_zero='foo' not in "):
|
||||
firwin(41, 0.5, pass_zero='foo')
|
||||
with assert_raises(ValueError, match="^Parameter pass_zero=1.0 not in "):
|
||||
firwin(41, 0.5, pass_zero=1.)
|
||||
for pass_zero in ('lowpass', 'highpass'):
|
||||
with assert_raises(ValueError, match='cutoff must have one'):
|
||||
firwin(41, [0.5, 0.6], pass_zero=pass_zero)
|
||||
for pass_zero in ('bandpass', 'bandstop'):
|
||||
with assert_raises(ValueError, match='must have at least two'):
|
||||
firwin(41, [0.5], pass_zero=pass_zero)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
firwin2(51, .5, 1, fs=np.array([10, 20]))
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True, reason="firwin2 uses np.interp")
|
||||
class TestFirwin2:
|
||||
|
||||
def test_invalid_args(self):
|
||||
# `freq` and `gain` have different lengths.
|
||||
with assert_raises(ValueError, match='must be of same length'):
|
||||
firwin2(50, [0, 0.5, 1], [0.0, 1.0])
|
||||
# `nfreqs` is less than `ntaps`.
|
||||
with assert_raises(ValueError, match='ntaps must be less than nfreqs'):
|
||||
firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33)
|
||||
# Decreasing value in `freq`
|
||||
with assert_raises(ValueError, match='must be nondecreasing'):
|
||||
firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0])
|
||||
# Value in `freq` repeated more than once.
|
||||
with assert_raises(ValueError, match='must not occur more than twice'):
|
||||
firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0])
|
||||
# `freq` does not start at 0.0.
|
||||
with assert_raises(ValueError, match='start with 0'):
|
||||
firwin2(50, [0.5, 1.0], [0.0, 1.0])
|
||||
# `freq` does not end at fs/2.
|
||||
with assert_raises(ValueError, match='end with fs/2'):
|
||||
firwin2(50, [0.0, 0.5], [0.0, 1.0])
|
||||
# Value 0 is repeated in `freq`
|
||||
with assert_raises(ValueError, match='0 must not be repeated'):
|
||||
firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
# Value fs/2 is repeated in `freq`
|
||||
with assert_raises(ValueError, match='fs/2 must not be repeated'):
|
||||
firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
# Value in `freq` that is too close to a repeated number
|
||||
with assert_raises(ValueError, match='cannot contain numbers '
|
||||
'that are too close'):
|
||||
firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0],
|
||||
[1.0, 1.0, 1.0, 0.0, 0.0])
|
||||
|
||||
# Type II filter, but the gain at nyquist frequency is not zero.
|
||||
with assert_raises(ValueError, match='Type II filter'):
|
||||
firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0])
|
||||
|
||||
# Type III filter, but the gains at nyquist and zero rate are not zero.
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True)
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
|
||||
with assert_raises(ValueError, match='Type III filter'):
|
||||
firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True)
|
||||
|
||||
# Type IV filter, but the gain at zero rate is not zero.
|
||||
with assert_raises(ValueError, match='Type IV filter'):
|
||||
firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True)
|
||||
|
||||
def test01(self, xp):
|
||||
width = 0.04
|
||||
beta = 12.0
|
||||
ntaps = 400
|
||||
# Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w
|
||||
# increases from w=0.5 to w=1 (w=1 is the Nyquist frequency).
|
||||
freq = xp.asarray([0.0, 0.5, 1.0])
|
||||
gain = xp.asarray([1.0, 1.0, 0.0])
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = xp.asarray([0.0, 0.25, 0.5 - width/2, 0.5 + width/2,
|
||||
0.75, 1.0 - width/2])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
freqs, response = xp.asarray(freqs), xp.asarray(response)
|
||||
assert_array_almost_equal(
|
||||
xp.abs(response),
|
||||
xp.asarray([1.0, 1.0, 1.0, 1.0 - width, 0.5, width]), decimal=5
|
||||
)
|
||||
|
||||
@skip_xp_backends("jax.numpy", reason="immutable arrays")
|
||||
def test02(self, xp):
|
||||
width = 0.04
|
||||
beta = 12.0
|
||||
# ntaps must be odd for positive gain at Nyquist.
|
||||
ntaps = 401
|
||||
# An ideal highpass filter.
|
||||
freq = xp.asarray([0.0, 0.5, 0.5, 1.0])
|
||||
gain = xp.asarray([0.0, 0.0, 1.0, 1.0])
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.25, 0.5 - width, 0.5 + width, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
freqs, response = xp.asarray(freqs), xp.asarray(response)
|
||||
assert_array_almost_equal(
|
||||
xp.abs(response),
|
||||
xp.asarray([0.0, 0.0, 0.0, 1.0, 1.0, 1.0]), decimal=5
|
||||
)
|
||||
|
||||
@skip_xp_backends("jax.numpy", reason="immutable arrays")
|
||||
def test03(self, xp):
|
||||
width = 0.02
|
||||
ntaps, beta = kaiserord(120, width)
|
||||
# ntaps must be odd for positive gain at Nyquist.
|
||||
ntaps = int(ntaps) | 1
|
||||
freq = xp.asarray([0.0, 0.4, 0.4, 0.5, 0.5, 1.0])
|
||||
gain = xp.asarray([1.0, 1.0, 0.0, 0.0, 1.0, 1.0])
|
||||
taps = firwin2(ntaps, freq, gain, window=('kaiser', beta))
|
||||
freq_samples = np.array([0.0, 0.4 - width, 0.4 + width, 0.45,
|
||||
0.5 - width, 0.5 + width, 0.75, 1.0])
|
||||
freqs, response = freqz(taps, worN=np.pi*freq_samples)
|
||||
freqs, response = xp.asarray(freqs), xp.asarray(response)
|
||||
assert_array_almost_equal(
|
||||
xp.abs(response),
|
||||
xp.asarray([1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]), decimal=5
|
||||
)
|
||||
|
||||
@skip_xp_backends("jax.numpy", reason="immutable arrays")
|
||||
def test04(self, xp):
|
||||
"""Test firwin2 when window=None."""
|
||||
ntaps = 5
|
||||
# Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0]
|
||||
freq = xp.asarray([0.0, 0.5, 0.5, 1.0])
|
||||
gain = xp.asarray([1.0, 1.0, 0.0, 0.0])
|
||||
|
||||
taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193)
|
||||
alpha = 0.5 * (ntaps - 1)
|
||||
m = xp.arange(0, ntaps, dtype=freq.dtype) - alpha
|
||||
h = 0.5 * xpx.sinc(0.5 * m)
|
||||
assert_array_almost_equal(h, taps)
|
||||
|
||||
def test05(self, xp):
|
||||
"""Test firwin2 for calculating Type IV filters"""
|
||||
ntaps = 1500
|
||||
|
||||
freq = xp.asarray([0.0, 1.0])
|
||||
gain = xp.asarray([0.0, 1.0])
|
||||
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
|
||||
|
||||
flip = array_namespace(freq).flip
|
||||
dec = {'decimal': 4.5} if xp_default_dtype(xp) == xp.float32 else {}
|
||||
assert_array_almost_equal(taps[: ntaps // 2], flip(-taps[ntaps // 2:]), **dec)
|
||||
|
||||
freqs, response = freqz(np.asarray(taps), worN=2048) # XXX convert freqz
|
||||
assert_array_almost_equal(abs(xp.asarray(response)),
|
||||
xp.asarray(freqs / np.pi), decimal=4)
|
||||
|
||||
@skip_xp_backends("jax.numpy", reason="immutable arrays")
|
||||
def test06(self, xp):
|
||||
"""Test firwin2 for calculating Type III filters"""
|
||||
ntaps = 1501
|
||||
|
||||
freq = xp.asarray([0.0, 0.5, 0.55, 1.0])
|
||||
gain = xp.asarray([0.0, 0.5, 0.0, 0.0])
|
||||
taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True)
|
||||
assert taps[ntaps // 2] == 0.0
|
||||
|
||||
flip = array_namespace(freq).flip
|
||||
dec = {'decimal': 4.5} if xp_default_dtype(xp) == xp.float32 else {}
|
||||
assert_array_almost_equal(taps[: ntaps // 2],
|
||||
flip(-taps[ntaps // 2 + 1:]), **dec
|
||||
)
|
||||
|
||||
freqs, response1 = freqz(np.asarray(taps), worN=2048) # XXX convert freqz
|
||||
response1 = xp.asarray(response1)
|
||||
response2 = xp.asarray(
|
||||
np.interp(np.asarray(freqs) / np.pi, np.asarray(freq), np.asarray(gain))
|
||||
)
|
||||
assert_array_almost_equal(abs(response1), response2, decimal=3)
|
||||
|
||||
def test_fs_nyq(self, xp):
|
||||
taps1 = firwin2(80, xp.asarray([0.0, 0.5, 1.0]), xp.asarray([1.0, 1.0, 0.0]))
|
||||
taps2 = firwin2(80, xp.asarray([0.0, 30.0, 60.0]), xp.asarray([1.0, 1.0, 0.0]),
|
||||
fs=120.0)
|
||||
assert_array_almost_equal(taps1, taps2)
|
||||
|
||||
def test_tuple(self):
|
||||
taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0))
|
||||
taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0])
|
||||
assert_array_almost_equal(taps1, taps2)
|
||||
|
||||
@skip_xp_backends("jax.numpy", reason="immutable arrays")
|
||||
def test_input_modyfication(self, xp):
|
||||
freq1 = xp.asarray([0.0, 0.5, 0.5, 1.0])
|
||||
freq2 = xp.asarray(freq1)
|
||||
firwin2(80, freq1, xp.asarray([1.0, 1.0, 0.0, 0.0]))
|
||||
xp_assert_equal(freq1, freq2)
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True)
|
||||
class TestRemez:
|
||||
|
||||
def test_bad_args(self):
|
||||
assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka')
|
||||
|
||||
def test_hilbert(self):
|
||||
N = 11 # number of taps in the filter
|
||||
a = 0.1 # width of the transition band
|
||||
|
||||
# design an unity gain hilbert bandpass filter from w to 0.5-w
|
||||
h = remez(11, [a, 0.5-a], [1], type='hilbert')
|
||||
|
||||
# make sure the filter has correct # of taps
|
||||
assert len(h) == N, "Number of Taps"
|
||||
|
||||
# make sure it is type III (anti-symmetric tap coefficients)
|
||||
assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1])
|
||||
|
||||
# Since the requested response is symmetric, all even coefficients
|
||||
# should be zero (or in this case really small)
|
||||
assert (abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero"
|
||||
|
||||
# now check the frequency response
|
||||
w, H = freqz(h, 1)
|
||||
f = w/2/np.pi
|
||||
Hmag = abs(H)
|
||||
|
||||
# should have a zero at 0 and pi (in this case close to zero)
|
||||
assert (Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi"
|
||||
|
||||
# check that the pass band is close to unity
|
||||
idx = np.logical_and(f > a, f < 0.5-a)
|
||||
assert (abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity"
|
||||
|
||||
def test_compare(self, xp):
|
||||
# test comparison to MATLAB
|
||||
k = [0.024590270518440, -0.041314581814658, -0.075943803756711,
|
||||
-0.003530911231040, 0.193140296954975, 0.373400753484939,
|
||||
0.373400753484939, 0.193140296954975, -0.003530911231040,
|
||||
-0.075943803756711, -0.041314581814658, 0.024590270518440]
|
||||
h = remez(12, xp.asarray([0, 0.3, 0.5, 1]), xp.asarray([1, 0]), fs=2.)
|
||||
atol_arg = {'atol': 1e-8} if xp_default_dtype(xp) == xp.float32 else {}
|
||||
xp_assert_close(h, xp.asarray(k, dtype=xp.float64), **atol_arg)
|
||||
|
||||
h = [-0.038976016082299, 0.018704846485491, -0.014644062687875,
|
||||
0.002879152556419, 0.016849978528150, -0.043276706138248,
|
||||
0.073641298245579, -0.103908158578635, 0.129770906801075,
|
||||
-0.147163447297124, 0.153302248456347, -0.147163447297124,
|
||||
0.129770906801075, -0.103908158578635, 0.073641298245579,
|
||||
-0.043276706138248, 0.016849978528150, 0.002879152556419,
|
||||
-0.014644062687875, 0.018704846485491, -0.038976016082299]
|
||||
atol_arg = {'atol': 3e-8} if xp_default_dtype(xp) == xp.float32 else {}
|
||||
xp_assert_close(
|
||||
remez(21, xp.asarray([0, 0.8, 0.9, 1]), xp.asarray([0, 1]), fs=2.),
|
||||
xp.asarray(h, dtype=xp.float64), **atol_arg
|
||||
)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
remez(11, .1, 1, fs=np.array([10, 20]))
|
||||
|
||||
def test_gh_23266(self, xp):
|
||||
bands = xp.asarray([0.0, 0.2, 0.3, 0.5])
|
||||
desired = xp.asarray([1.0, 0.0])
|
||||
weight = xp.asarray([1.0, 2.0])
|
||||
remez(21, bands, desired, weight=weight)
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True, reason="lstsq")
|
||||
class TestFirls:
|
||||
|
||||
def test_bad_args(self):
|
||||
# even numtaps
|
||||
assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0])
|
||||
# odd bands
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0])
|
||||
# len(bands) != len(desired)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0])
|
||||
# non-monotonic bands
|
||||
assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0])
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4)
|
||||
assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4)
|
||||
# negative desired
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1])
|
||||
# len(weight) != len(pairs)
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[1, 2])
|
||||
# negative weight
|
||||
assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], weight=[-1])
|
||||
|
||||
@skip_xp_backends("dask.array", reason="dask fancy indexing shape=(nan,)")
|
||||
def test_firls(self, xp):
|
||||
N = 11 # number of taps in the filter
|
||||
a = 0.1 # width of the transition band
|
||||
|
||||
# design a halfband symmetric low-pass filter
|
||||
h = firls(11, xp.asarray([0, a, 0.5 - a, 0.5]), xp.asarray([1, 1, 0, 0]),
|
||||
fs=1.0)
|
||||
|
||||
# make sure the filter has correct # of taps
|
||||
assert h.shape[0] == N
|
||||
|
||||
# make sure it is symmetric
|
||||
midx = (N-1) // 2
|
||||
flip = array_namespace(h).flip
|
||||
assert_array_almost_equal(h[:midx], flip(h[midx+1:])) # h[:-midx-1:-1])
|
||||
|
||||
# make sure the center tap is 0.5
|
||||
assert math.isclose(h[midx], 0.5, abs_tol=1e-8)
|
||||
|
||||
# For halfband symmetric, odd coefficients (except the center)
|
||||
# should be zero (really small)
|
||||
hodd = xp.stack((h[1:midx:2], h[-midx+1::2]))
|
||||
assert_array_almost_equal(hodd, xp.zeros_like(hodd))
|
||||
|
||||
# now check the frequency response
|
||||
w, H = freqz(np.asarray(h), 1)
|
||||
w, H = xp.asarray(w), xp.asarray(H)
|
||||
f = w/2/xp.pi
|
||||
Hmag = xp.abs(H)
|
||||
|
||||
# check that the pass band is close to unity
|
||||
idx = xp.logical_and(f > 0, f < a)
|
||||
assert_array_almost_equal(Hmag[idx], xp.ones_like(Hmag[idx]), decimal=3)
|
||||
|
||||
# check that the stop band is close to zero
|
||||
idx = xp.logical_and(f > 0.5 - a, f < 0.5)
|
||||
assert_array_almost_equal(Hmag[idx], xp.zeros_like(Hmag[idx]), decimal=3)
|
||||
|
||||
def test_compare(self, xp):
|
||||
# compare to OCTAVE output
|
||||
taps = firls(9, xp.asarray([0, 0.5, 0.55, 1]),
|
||||
xp.asarray([1, 1, 0, 0]), weight=xp.asarray([1, 2]))
|
||||
# >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]);
|
||||
known_taps = [-6.26930101730182e-04, -1.03354450635036e-01,
|
||||
-9.81576747564301e-03, 3.17271686090449e-01,
|
||||
5.11409425599933e-01, 3.17271686090449e-01,
|
||||
-9.81576747564301e-03, -1.03354450635036e-01,
|
||||
-6.26930101730182e-04]
|
||||
atol_arg = {'atol': 5e-8} if xp_default_dtype(xp) == xp.float32 else {}
|
||||
known_taps = xp.asarray(known_taps, dtype=xp.float64)
|
||||
xp_assert_close(taps, known_taps, **atol_arg)
|
||||
|
||||
# compare to MATLAB output
|
||||
taps = firls(11, xp.asarray([0, 0.5, 0.5, 1]),
|
||||
xp.asarray([1, 1, 0, 0]), weight=xp.asarray([1, 2]))
|
||||
# >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]);
|
||||
known_taps = [
|
||||
0.058545300496815, -0.014233383714318, -0.104688258464392,
|
||||
0.012403323025279, 0.317930861136062, 0.488047220029700,
|
||||
0.317930861136062, 0.012403323025279, -0.104688258464392,
|
||||
-0.014233383714318, 0.058545300496815]
|
||||
known_taps = xp.asarray(known_taps, dtype=xp.float64)
|
||||
atol_arg = {'atol': 3e-8} if xp_default_dtype(xp) == xp.float32 else {}
|
||||
xp_assert_close(taps, known_taps, **atol_arg)
|
||||
|
||||
# With linear changes:
|
||||
taps = firls(7, xp.asarray((0, 1, 2, 3, 4, 5)),
|
||||
xp.asarray([1, 0, 0, 1, 1, 0]), fs=20)
|
||||
# >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0])
|
||||
known_taps = [
|
||||
1.156090832768218, -4.1385894727395849, 7.5288619164321826,
|
||||
-8.5530572592947856, 7.5288619164321826, -4.1385894727395849,
|
||||
1.156090832768218]
|
||||
known_taps = xp.asarray(known_taps, dtype=xp.float64)
|
||||
xp_assert_close(taps, known_taps)
|
||||
|
||||
def test_rank_deficient(self, xp):
|
||||
# solve() runs but warns (only sometimes, so here we don't use match)
|
||||
x = firls(21, xp.asarray([0, 0.1, 0.9, 1]), xp.asarray([1, 1, 0, 0]))
|
||||
w, h = freqz(np.asarray(x), fs=2.)
|
||||
w, h = map(xp.asarray, (w, h)) # XXX convert freqz
|
||||
absh2 = xp.abs(h[:2])
|
||||
xp_assert_close(absh2, xp.ones_like(absh2), atol=1e-5)
|
||||
absh2 = xp.abs(h[-2:])
|
||||
xp_assert_close(absh2, xp.zeros_like(absh2), atol=1e-6, rtol=1e-7)
|
||||
# switch to pinvh (tolerances could be higher with longer
|
||||
# filters, but using shorter ones is faster computationally and
|
||||
# the idea is the same)
|
||||
x = firls(101, xp.asarray([0, 0.01, 0.99, 1]), xp.asarray([1, 1, 0, 0]))
|
||||
w, h = freqz(np.asarray(x), fs=2.)
|
||||
w, h = map(xp.asarray, (w, h)) # XXX convert freqz
|
||||
mask = xp.asarray(w < 0.01)
|
||||
h = xp.asarray(h)
|
||||
assert xp.sum(xp.astype(mask, xp.int64)) > 3
|
||||
habs = xp.abs(h[mask])
|
||||
xp_assert_close(habs, xp.ones_like(habs), atol=1e-4)
|
||||
mask = xp.asarray(w > 0.99)
|
||||
assert xp.sum(xp.astype(mask, xp.int64)) > 3
|
||||
habs = xp.abs(h[mask])
|
||||
xp_assert_close(habs, xp.zeros_like(habs), atol=1e-4)
|
||||
|
||||
def test_fs_validation(self):
|
||||
with pytest.raises(ValueError, match="Sampling.*single scalar"):
|
||||
firls(11, .1, 1, fs=np.array([10, 20]))
|
||||
|
||||
class TestMinimumPhase:
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_bad_args(self):
|
||||
# not enough taps
|
||||
assert_raises(ValueError, minimum_phase, [1.])
|
||||
assert_raises(ValueError, minimum_phase, [1., 1.])
|
||||
assert_raises(ValueError, minimum_phase, np.full(10, 1j))
|
||||
assert_raises((ValueError, TypeError), minimum_phase, 'foo')
|
||||
assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8)
|
||||
assert_raises(ValueError, minimum_phase, np.ones(10), method='foo')
|
||||
assert_warns(RuntimeWarning, minimum_phase, np.arange(3))
|
||||
with pytest.raises(ValueError, match="is only supported when"):
|
||||
minimum_phase(np.ones(3), method='hilbert', half=False)
|
||||
|
||||
def test_homomorphic(self):
|
||||
# check that it can recover frequency responses of arbitrary
|
||||
# linear-phase filters
|
||||
|
||||
# for some cases we can get the actual filter back
|
||||
h = [1, -1]
|
||||
h_new = minimum_phase(np.convolve(h, h[::-1]))
|
||||
xp_assert_close(h_new, np.asarray(h, dtype=np.float64), rtol=0.05)
|
||||
|
||||
# but in general we only guarantee we get the magnitude back
|
||||
rng = np.random.RandomState(0)
|
||||
for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101):
|
||||
h = rng.randn(n)
|
||||
h_linear = np.convolve(h, h[::-1])
|
||||
h_new = minimum_phase(h_linear)
|
||||
xp_assert_close(np.abs(fft(h_new)), np.abs(fft(h)), rtol=1e-4)
|
||||
h_new = minimum_phase(h_linear, half=False)
|
||||
assert len(h_linear) == len(h_new)
|
||||
xp_assert_close(np.abs(fft(h_new)), np.abs(fft(h_linear)), rtol=1e-4)
|
||||
|
||||
@skip_xp_backends("dask.array", reason="too slow")
|
||||
@skip_xp_backends("jax.numpy", reason="immutable arrays")
|
||||
def test_hilbert(self, xp):
|
||||
# compare to MATLAB output of reference implementation
|
||||
|
||||
# f=[0 0.3 0.5 1];
|
||||
# a=[1 1 0 0];
|
||||
# h=remez(11,f,a);
|
||||
h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.)
|
||||
k = [0.349585548646686, 0.373552164395447, 0.326082685363438,
|
||||
0.077152207480935, -0.129943946349364, -0.059355880509749]
|
||||
h = xp.asarray(h)
|
||||
k = xp.asarray(k, dtype=xp.float64)
|
||||
m = minimum_phase(h, 'hilbert')
|
||||
xp_assert_close(m, k, rtol=5e-3)
|
||||
|
||||
# f=[0 0.8 0.9 1];
|
||||
# a=[0 0 1 1];
|
||||
# h=remez(20,f,a);
|
||||
h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.)
|
||||
k = [0.232486803906329, -0.133551833687071, 0.151871456867244,
|
||||
-0.157957283165866, 0.151739294892963, -0.129293146705090,
|
||||
0.100787844523204, -0.065832656741252, 0.035361328741024,
|
||||
-0.014977068692269, -0.158416139047557]
|
||||
h = xp.asarray(h)
|
||||
k = xp.asarray(k, dtype=xp.float64)
|
||||
m = minimum_phase(h, 'hilbert', n_fft=2**19)
|
||||
xp_assert_close(m, k, rtol=2e-3)
|
||||
|
||||
|
||||
class Testfirwin_2d:
|
||||
def test_invalid_args(self):
|
||||
with pytest.raises(ValueError,
|
||||
match="hsize must be a 2-element tuple or list"):
|
||||
firwin_2d((50,), window=(("kaiser", 5.0), "boxcar"), fc=0.4)
|
||||
|
||||
with pytest.raises(ValueError,
|
||||
match="window must be a 2-element tuple or list"):
|
||||
firwin_2d((51, 51), window=("hamming",), fc=0.5)
|
||||
|
||||
with pytest.raises(ValueError,
|
||||
match="window must be a 2-element tuple or list"):
|
||||
firwin_2d((51, 51), window="invalid_window", fc=0.5)
|
||||
|
||||
def test_filter_design(self):
|
||||
hsize = (51, 51)
|
||||
window = (("kaiser", 8.0), ("kaiser", 8.0))
|
||||
fc = 0.4
|
||||
taps_kaiser = firwin_2d(hsize, window, fc=fc)
|
||||
assert taps_kaiser.shape == (51, 51)
|
||||
|
||||
window = ("hamming", "hamming")
|
||||
taps_hamming = firwin_2d(hsize, window, fc=fc)
|
||||
assert taps_hamming.shape == (51, 51)
|
||||
|
||||
def test_impulse_response(self):
|
||||
hsize = (31, 31)
|
||||
window = ("hamming", "hamming")
|
||||
fc = 0.4
|
||||
taps = firwin_2d(hsize, window, fc=fc)
|
||||
|
||||
impulse = np.zeros((63, 63))
|
||||
impulse[31, 31] = 1
|
||||
|
||||
response = convolve2d(impulse, taps, mode='same')
|
||||
|
||||
expected_response = taps
|
||||
xp_assert_close(response[16:47, 16:47], expected_response, rtol=1e-5)
|
||||
|
||||
def test_frequency_response(self):
|
||||
"""Compare 1d and 2d frequency response. """
|
||||
hsize = (31, 31)
|
||||
windows = ("hamming", "hamming")
|
||||
fc = 0.4
|
||||
taps_1d = firwin(numtaps=hsize[0], cutoff=fc, window=windows[0])
|
||||
taps_2d = firwin_2d(hsize, windows, fc=fc)
|
||||
|
||||
f_resp_1d = fft(taps_1d)
|
||||
f_resp_2d = fft2(taps_2d)
|
||||
|
||||
xp_assert_close(f_resp_2d[0, :], f_resp_1d,
|
||||
err_msg='DC Gain at (0, f1) is not unity!')
|
||||
xp_assert_close(f_resp_2d[:, 0], f_resp_1d,
|
||||
err_msg='DC Gain at (f0, 0) is not unity!')
|
||||
xp_assert_close(f_resp_2d, np.outer(f_resp_1d, f_resp_1d),
|
||||
atol=np.finfo(f_resp_2d.dtype).resolution,
|
||||
err_msg='2d frequency response is not product of 1d responses')
|
||||
|
||||
def test_symmetry(self):
|
||||
hsize = (51, 51)
|
||||
window = ("hamming", "hamming")
|
||||
fc = 0.4
|
||||
taps = firwin_2d(hsize, window, fc=fc)
|
||||
xp_assert_close(taps, np.flip(taps), rtol=1e-5)
|
||||
|
||||
def test_circular_symmetry(self):
|
||||
hsize = (51, 51)
|
||||
window = "hamming"
|
||||
taps = firwin_2d(hsize, window, circular=True, fc=0.5)
|
||||
center = hsize[0] // 2
|
||||
for i in range(hsize[0]):
|
||||
for j in range(hsize[1]):
|
||||
xp_assert_close(taps[i, j],
|
||||
taps[center - (i - center), center - (j - center)],
|
||||
rtol=1e-5)
|
||||
|
||||
def test_edge_case_circular(self):
|
||||
hsize = (3, 3)
|
||||
window = "hamming"
|
||||
taps_small = firwin_2d(hsize, window, circular=True, fc=0.5)
|
||||
assert taps_small.shape == (3, 3)
|
||||
|
||||
hsize = (101, 101)
|
||||
taps_large = firwin_2d(hsize, window, circular=True, fc=0.5)
|
||||
assert taps_large.shape == (101, 101)
|
||||
|
||||
def test_known_result(self):
|
||||
hsize = (5, 5)
|
||||
window = ('kaiser', 8.0)
|
||||
fc = 0.1
|
||||
fs = 2
|
||||
|
||||
row_filter = firwin(hsize[0], cutoff=fc, window=window, fs=fs)
|
||||
col_filter = firwin(hsize[1], cutoff=fc, window=window, fs=fs)
|
||||
known_result = np.outer(row_filter, col_filter)
|
||||
|
||||
taps = firwin_2d(hsize, (window, window), fc=fc)
|
||||
assert taps.shape == known_result.shape, (
|
||||
f"Shape mismatch: {taps.shape} vs {known_result.shape}"
|
||||
)
|
||||
assert np.allclose(taps, known_result, rtol=1e-1), (
|
||||
f"Filter shape mismatch: {taps} vs {known_result}"
|
||||
)
|
||||
1225
venv/lib/python3.13/site-packages/scipy/signal/tests/test_ltisys.py
Normal file
1225
venv/lib/python3.13/site-packages/scipy/signal/tests/test_ltisys.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,71 @@
|
|||
import numpy as np
|
||||
from pytest import raises as assert_raises
|
||||
from scipy._lib._array_api import xp_assert_close, xp_assert_equal
|
||||
|
||||
from numpy.fft import fft, ifft
|
||||
|
||||
from scipy.signal import max_len_seq
|
||||
|
||||
|
||||
class TestMLS:
|
||||
|
||||
def test_mls_inputs(self):
|
||||
# can't all be zero state
|
||||
assert_raises(ValueError, max_len_seq,
|
||||
10, state=np.zeros(10))
|
||||
# wrong size state
|
||||
assert_raises(ValueError, max_len_seq, 10,
|
||||
state=np.ones(3))
|
||||
# wrong length
|
||||
assert_raises(ValueError, max_len_seq, 10, length=-1)
|
||||
xp_assert_equal(max_len_seq(10, length=0)[0],
|
||||
np.asarray([], dtype=np.int8)
|
||||
)
|
||||
# unknown taps
|
||||
assert_raises(ValueError, max_len_seq, 64)
|
||||
# bad taps
|
||||
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
|
||||
|
||||
def test_mls_output(self):
|
||||
# define some alternate working taps
|
||||
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
|
||||
8: [7, 5, 3]}
|
||||
# assume the other bit levels work, too slow to test higher orders...
|
||||
for nbits in range(2, 8):
|
||||
for state in [None, np.round(np.random.rand(nbits))]:
|
||||
for taps in [None, alt_taps[nbits]]:
|
||||
if state is not None and np.all(state == 0):
|
||||
state[0] = 1 # they can't all be zero
|
||||
orig_m = max_len_seq(nbits, state=state,
|
||||
taps=taps)[0]
|
||||
m = 2. * orig_m - 1. # convert to +/- 1 representation
|
||||
# First, make sure we got all 1's or -1
|
||||
err_msg = "mls had non binary terms"
|
||||
xp_assert_equal(np.abs(m), np.ones_like(m),
|
||||
err_msg=err_msg)
|
||||
# Test via circular cross-correlation, which is just mult.
|
||||
# in the frequency domain with one signal conjugated
|
||||
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
|
||||
out_len = 2**nbits - 1
|
||||
# impulse amplitude == test_len
|
||||
err_msg = "mls impulse has incorrect value"
|
||||
xp_assert_close(tester[0],
|
||||
float(out_len),
|
||||
err_msg=err_msg
|
||||
)
|
||||
# steady-state is -1
|
||||
err_msg = "mls steady-state has incorrect value"
|
||||
xp_assert_close(tester[1:],
|
||||
np.full(out_len - 1, -1, dtype=tester.dtype),
|
||||
err_msg=err_msg)
|
||||
# let's do the split thing using a couple options
|
||||
for n in (1, 2**(nbits - 1)):
|
||||
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
|
||||
length=n)
|
||||
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
|
||||
length=1)
|
||||
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
|
||||
length=out_len - n - 1)
|
||||
new_m = np.concatenate((m1, m2, m3))
|
||||
xp_assert_equal(orig_m, new_m)
|
||||
|
||||
|
|
@ -0,0 +1,915 @@
|
|||
import copy
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from pytest import raises, warns
|
||||
from scipy._lib._array_api import xp_assert_close, xp_assert_equal
|
||||
|
||||
from scipy.signal._peak_finding import (
|
||||
argrelmax,
|
||||
argrelmin,
|
||||
peak_prominences,
|
||||
peak_widths,
|
||||
_unpack_condition_args,
|
||||
find_peaks,
|
||||
find_peaks_cwt,
|
||||
_identify_ridge_lines
|
||||
)
|
||||
from scipy.signal.windows import gaussian
|
||||
from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning
|
||||
|
||||
|
||||
def _gen_gaussians(center_locs, sigmas, total_length):
|
||||
xdata = np.arange(0, total_length).astype(float)
|
||||
out_data = np.zeros(total_length, dtype=float)
|
||||
for ind, sigma in enumerate(sigmas):
|
||||
tmp = (xdata - center_locs[ind]) / sigma
|
||||
out_data += np.exp(-(tmp**2))
|
||||
return out_data
|
||||
|
||||
|
||||
def _gen_gaussians_even(sigmas, total_length):
|
||||
num_peaks = len(sigmas)
|
||||
delta = total_length / (num_peaks + 1)
|
||||
center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int)
|
||||
out_data = _gen_gaussians(center_locs, sigmas, total_length)
|
||||
return out_data, center_locs
|
||||
|
||||
|
||||
def _gen_ridge_line(start_locs, max_locs, length, distances, gaps):
|
||||
"""
|
||||
Generate coordinates for a ridge line.
|
||||
|
||||
Will be a series of coordinates, starting a start_loc (length 2).
|
||||
The maximum distance between any adjacent columns will be
|
||||
`max_distance`, the max distance between adjacent rows
|
||||
will be `map_gap'.
|
||||
|
||||
`max_locs` should be the size of the intended matrix. The
|
||||
ending coordinates are guaranteed to be less than `max_locs`,
|
||||
although they may not approach `max_locs` at all.
|
||||
"""
|
||||
|
||||
def keep_bounds(num, max_val):
|
||||
out = max(num, 0)
|
||||
out = min(out, max_val)
|
||||
return out
|
||||
|
||||
gaps = copy.deepcopy(gaps)
|
||||
distances = copy.deepcopy(distances)
|
||||
|
||||
locs = np.zeros([length, 2], dtype=int)
|
||||
locs[0, :] = start_locs
|
||||
total_length = max_locs[0] - start_locs[0] - sum(gaps)
|
||||
if total_length < length:
|
||||
raise ValueError('Cannot generate ridge line according to constraints')
|
||||
dist_int = length / len(distances) - 1
|
||||
gap_int = length / len(gaps) - 1
|
||||
for ind in range(1, length):
|
||||
nextcol = locs[ind - 1, 1]
|
||||
nextrow = locs[ind - 1, 0] + 1
|
||||
if (ind % dist_int == 0) and (len(distances) > 0):
|
||||
nextcol += ((-1)**ind)*distances.pop()
|
||||
if (ind % gap_int == 0) and (len(gaps) > 0):
|
||||
nextrow += gaps.pop()
|
||||
nextrow = keep_bounds(nextrow, max_locs[0])
|
||||
nextcol = keep_bounds(nextcol, max_locs[1])
|
||||
locs[ind, :] = [nextrow, nextcol]
|
||||
|
||||
return [locs[:, 0], locs[:, 1]]
|
||||
|
||||
|
||||
class TestLocalMaxima1d:
|
||||
|
||||
def test_empty(self):
|
||||
"""Test with empty signal."""
|
||||
x = np.array([], dtype=np.float64)
|
||||
for array in _local_maxima_1d(x):
|
||||
xp_assert_equal(array, np.array([]), check_dtype=False)
|
||||
assert array.base is None
|
||||
|
||||
def test_linear(self):
|
||||
"""Test with linear signal."""
|
||||
x = np.linspace(0, 100)
|
||||
for array in _local_maxima_1d(x):
|
||||
xp_assert_equal(array, np.array([], dtype=np.intp))
|
||||
assert array.base is None
|
||||
|
||||
def test_simple(self):
|
||||
"""Test with simple signal."""
|
||||
x = np.linspace(-10, 10, 50)
|
||||
x[2::3] += 1
|
||||
expected = np.arange(2, 50, 3, dtype=np.intp)
|
||||
for array in _local_maxima_1d(x):
|
||||
# For plateaus of size 1, the edges are identical with the
|
||||
# midpoints
|
||||
xp_assert_equal(array, expected, check_dtype=False)
|
||||
assert array.base is None
|
||||
|
||||
def test_flat_maxima(self):
|
||||
"""Test if flat maxima are detected correctly."""
|
||||
x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10,
|
||||
-5, -5, -5, -5, -5, -10])
|
||||
midpoints, left_edges, right_edges = _local_maxima_1d(x)
|
||||
xp_assert_equal(midpoints, np.array([2, 4, 8, 12, 18]), check_dtype=False)
|
||||
xp_assert_equal(left_edges, np.array([2, 4, 7, 11, 16]), check_dtype=False)
|
||||
xp_assert_equal(right_edges, np.array([2, 5, 9, 14, 20]), check_dtype=False)
|
||||
|
||||
@pytest.mark.parametrize('x', [
|
||||
np.array([1., 0, 2]),
|
||||
np.array([3., 3, 0, 4, 4]),
|
||||
np.array([5., 5, 5, 0, 6, 6, 6]),
|
||||
])
|
||||
def test_signal_edges(self, x):
|
||||
"""Test if behavior on signal edges is correct."""
|
||||
for array in _local_maxima_1d(x):
|
||||
xp_assert_equal(array, np.array([], dtype=np.intp))
|
||||
assert array.base is None
|
||||
|
||||
def test_exceptions(self):
|
||||
"""Test input validation and raised exceptions."""
|
||||
with raises(ValueError, match="wrong number of dimensions"):
|
||||
_local_maxima_1d(np.ones((1, 1)))
|
||||
with raises(ValueError, match="expected 'const float64_t'"):
|
||||
_local_maxima_1d(np.ones(1, dtype=int))
|
||||
with raises(TypeError, match="list"):
|
||||
_local_maxima_1d([1., 2.])
|
||||
with raises(TypeError, match="'x' must not be None"):
|
||||
_local_maxima_1d(None)
|
||||
|
||||
|
||||
class TestRidgeLines:
|
||||
|
||||
def test_empty(self):
|
||||
test_matr = np.zeros([20, 100])
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert len(lines) == 0
|
||||
|
||||
def test_minimal(self):
|
||||
test_matr = np.zeros([20, 100])
|
||||
test_matr[0, 10] = 1
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert len(lines) == 1
|
||||
|
||||
test_matr = np.zeros([20, 100])
|
||||
test_matr[0:2, 10] = 1
|
||||
lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1)
|
||||
assert len(lines) == 1
|
||||
|
||||
def test_single_pass(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
gaps = [0, 1, 2, 0, 1]
|
||||
test_matr = np.zeros([20, 50]) + 1e-12
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_distances = np.full(20, max(distances))
|
||||
identified_lines = _identify_ridge_lines(test_matr,
|
||||
max_distances,
|
||||
max(gaps) + 1)
|
||||
assert len(identified_lines) == 1
|
||||
for iline_, line_ in zip(identified_lines[0], line):
|
||||
xp_assert_equal(iline_, line_, check_dtype=False)
|
||||
|
||||
def test_single_bigdist(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
gaps = [0, 1, 2, 4]
|
||||
test_matr = np.zeros([20, 50])
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 3
|
||||
max_distances = np.full(20, max_dist)
|
||||
#This should get 2 lines, since the distance is too large
|
||||
identified_lines = _identify_ridge_lines(test_matr,
|
||||
max_distances,
|
||||
max(gaps) + 1)
|
||||
assert len(identified_lines) == 2
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
def test_single_biggap(self):
|
||||
distances = [0, 1, 2, 5]
|
||||
max_gap = 3
|
||||
gaps = [0, 4, 2, 1]
|
||||
test_matr = np.zeros([20, 50])
|
||||
length = 12
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 6
|
||||
max_distances = np.full(20, max_dist)
|
||||
#This should get 2 lines, since the gap is too large
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
|
||||
assert len(identified_lines) == 2
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
def test_single_biggaps(self):
|
||||
distances = [0]
|
||||
max_gap = 1
|
||||
gaps = [3, 6]
|
||||
test_matr = np.zeros([50, 50])
|
||||
length = 30
|
||||
line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps)
|
||||
test_matr[line[0], line[1]] = 1
|
||||
max_dist = 1
|
||||
max_distances = np.full(50, max_dist)
|
||||
#This should get 3 lines, since the gaps are too large
|
||||
identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap)
|
||||
assert len(identified_lines) == 3
|
||||
|
||||
for iline in identified_lines:
|
||||
adists = np.diff(iline[1])
|
||||
np.testing.assert_array_less(np.abs(adists), max_dist)
|
||||
|
||||
agaps = np.diff(iline[0])
|
||||
np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1)
|
||||
|
||||
|
||||
class TestArgrel:
|
||||
|
||||
def test_empty(self):
|
||||
# Regression test for gh-2832.
|
||||
# When there are no relative extrema, make sure that
|
||||
# the number of empty arrays returned matches the
|
||||
# dimension of the input.
|
||||
|
||||
empty_array = np.array([], dtype=int)
|
||||
|
||||
z1 = np.zeros(5)
|
||||
|
||||
i = argrelmin(z1)
|
||||
xp_assert_equal(len(i), 1)
|
||||
xp_assert_equal(i[0], empty_array, check_dtype=False)
|
||||
|
||||
z2 = np.zeros((3, 5))
|
||||
|
||||
row, col = argrelmin(z2, axis=0)
|
||||
xp_assert_equal(row, empty_array, check_dtype=False)
|
||||
xp_assert_equal(col, empty_array, check_dtype=False)
|
||||
|
||||
row, col = argrelmin(z2, axis=1)
|
||||
xp_assert_equal(row, empty_array, check_dtype=False)
|
||||
xp_assert_equal(col, empty_array, check_dtype=False)
|
||||
|
||||
def test_basic(self):
|
||||
# Note: the docstrings for the argrel{min,max,extrema} functions
|
||||
# do not give a guarantee of the order of the indices, so we'll
|
||||
# sort them before testing.
|
||||
|
||||
x = np.array([[1, 2, 2, 3, 2],
|
||||
[2, 1, 2, 2, 3],
|
||||
[3, 2, 1, 2, 2],
|
||||
[2, 3, 2, 1, 2],
|
||||
[1, 2, 3, 2, 1]])
|
||||
|
||||
row, col = argrelmax(x, axis=0)
|
||||
order = np.argsort(row)
|
||||
xp_assert_equal(row[order], [1, 2, 3], check_dtype=False)
|
||||
xp_assert_equal(col[order], [4, 0, 1], check_dtype=False)
|
||||
|
||||
row, col = argrelmax(x, axis=1)
|
||||
order = np.argsort(row)
|
||||
xp_assert_equal(row[order], [0, 3, 4], check_dtype=False)
|
||||
xp_assert_equal(col[order], [3, 1, 2], check_dtype=False)
|
||||
|
||||
row, col = argrelmin(x, axis=0)
|
||||
order = np.argsort(row)
|
||||
xp_assert_equal(row[order], [1, 2, 3], check_dtype=False)
|
||||
xp_assert_equal(col[order], [1, 2, 3], check_dtype=False)
|
||||
|
||||
row, col = argrelmin(x, axis=1)
|
||||
order = np.argsort(row)
|
||||
xp_assert_equal(row[order], [1, 2, 3], check_dtype=False)
|
||||
xp_assert_equal(col[order], [1, 2, 3], check_dtype=False)
|
||||
|
||||
def test_highorder(self):
|
||||
order = 2
|
||||
sigmas = [1.0, 2.0, 10.0, 5.0, 15.0]
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, 500)
|
||||
test_data[act_locs + order] = test_data[act_locs]*0.99999
|
||||
test_data[act_locs - order] = test_data[act_locs]*0.99999
|
||||
rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0]
|
||||
|
||||
assert len(rel_max_locs) == len(act_locs)
|
||||
assert (rel_max_locs == act_locs).all()
|
||||
|
||||
def test_2d_gaussians(self):
|
||||
sigmas = [1.0, 2.0, 10.0]
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, 100)
|
||||
rot_factor = 20
|
||||
rot_range = np.arange(0, len(test_data)) - rot_factor
|
||||
test_data_2 = np.vstack([test_data, test_data[rot_range]])
|
||||
rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1)
|
||||
|
||||
for rw in range(0, test_data_2.shape[0]):
|
||||
inds = (rel_max_rows == rw)
|
||||
|
||||
assert len(rel_max_cols[inds]) == len(act_locs)
|
||||
assert (act_locs == (rel_max_cols[inds] - rot_factor*rw)).all()
|
||||
|
||||
|
||||
class TestPeakProminences:
|
||||
|
||||
def test_empty(self):
|
||||
"""
|
||||
Test if an empty array is returned if no peaks are provided.
|
||||
"""
|
||||
out = peak_prominences([1, 2, 3], [])
|
||||
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
|
||||
assert arr.size == 0
|
||||
assert arr.dtype == dtype
|
||||
|
||||
out = peak_prominences([], [])
|
||||
for arr, dtype in zip(out, [np.float64, np.intp, np.intp]):
|
||||
assert arr.size == 0
|
||||
assert arr.dtype == dtype
|
||||
|
||||
def test_basic(self):
|
||||
"""
|
||||
Test if height of prominences is correctly calculated in signal with
|
||||
rising baseline (peak widths are 1 sample).
|
||||
"""
|
||||
# Prepare basic signal
|
||||
x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1])
|
||||
peaks = np.array([1, 2, 4, 6])
|
||||
lbases = np.array([0, 0, 0, 5])
|
||||
rbases = np.array([3, 3, 5, 7])
|
||||
proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0)
|
||||
# Test if calculation matches handcrafted result
|
||||
out = peak_prominences(x, peaks)
|
||||
xp_assert_equal(out[0], proms, check_dtype=False)
|
||||
xp_assert_equal(out[1], lbases, check_dtype=False)
|
||||
xp_assert_equal(out[2], rbases, check_dtype=False)
|
||||
|
||||
def test_edge_cases(self):
|
||||
"""
|
||||
Test edge cases.
|
||||
"""
|
||||
# Peaks have same height, prominence and bases
|
||||
x = [0, 2, 1, 2, 1, 2, 0]
|
||||
peaks = [1, 3, 5]
|
||||
proms, lbases, rbases = peak_prominences(x, peaks)
|
||||
xp_assert_equal(proms, np.asarray([2.0, 2, 2]), check_dtype=False)
|
||||
xp_assert_equal(lbases, [0, 0, 0], check_dtype=False)
|
||||
xp_assert_equal(rbases, [6, 6, 6], check_dtype=False)
|
||||
|
||||
# Peaks have same height & prominence but different bases
|
||||
x = [0, 1, 0, 1, 0, 1, 0]
|
||||
peaks = np.array([1, 3, 5])
|
||||
proms, lbases, rbases = peak_prominences(x, peaks)
|
||||
xp_assert_equal(proms, np.asarray([1.0, 1, 1]))
|
||||
xp_assert_equal(lbases, peaks - 1, check_dtype=False)
|
||||
xp_assert_equal(rbases, peaks + 1, check_dtype=False)
|
||||
|
||||
def test_non_contiguous(self):
|
||||
"""
|
||||
Test with non-C-contiguous input arrays.
|
||||
"""
|
||||
x = np.repeat([-9, 9, 9, 0, 3, 1], 2)
|
||||
peaks = np.repeat([1, 2, 4], 2)
|
||||
proms, lbases, rbases = peak_prominences(x[::2], peaks[::2])
|
||||
xp_assert_equal(proms, np.asarray([9.0, 9, 2]))
|
||||
xp_assert_equal(lbases, [0, 0, 3], check_dtype=False)
|
||||
xp_assert_equal(rbases, [3, 3, 5], check_dtype=False)
|
||||
|
||||
def test_wlen(self):
|
||||
"""
|
||||
Test if wlen actually shrinks the evaluation range correctly.
|
||||
"""
|
||||
x = [0, 1, 2, 3, 1, 0, -1]
|
||||
peak = [3]
|
||||
# Test rounding behavior of wlen
|
||||
proms = peak_prominences(x, peak)
|
||||
for prom, val in zip(proms, [3.0, 0, 6]):
|
||||
assert prom == val
|
||||
|
||||
for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]:
|
||||
proms = peak_prominences(x, peak, wlen)
|
||||
for prom, val in zip(proms, [3. - i, 0 + i, 6 - i]):
|
||||
assert prom == val
|
||||
|
||||
def test_exceptions(self):
|
||||
"""
|
||||
Verify that exceptions and warnings are raised.
|
||||
"""
|
||||
# x with dimension > 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences([[0, 1, 1, 0]], [1, 2])
|
||||
# peaks with dimension > 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences([0, 1, 1, 0], [[1, 2]])
|
||||
# x with dimension < 1
|
||||
with raises(ValueError, match='1-D array'):
|
||||
peak_prominences(3, [0,])
|
||||
|
||||
# empty x with supplied
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
peak_prominences([], [0])
|
||||
# invalid indices with non-empty x
|
||||
for p in [-100, -1, 3, 1000]:
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
peak_prominences([1, 0, 2], [p])
|
||||
|
||||
# peaks is not cast-able to np.intp
|
||||
with raises(TypeError, match='cannot safely cast'):
|
||||
peak_prominences([0, 1, 1, 0], [1.1, 2.3])
|
||||
|
||||
# wlen < 3
|
||||
with raises(ValueError, match='wlen'):
|
||||
peak_prominences(np.arange(10), [3, 5], wlen=1)
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_warnings(self):
|
||||
"""
|
||||
Verify that appropriate warnings are raised.
|
||||
"""
|
||||
msg = "some peaks have a prominence of 0"
|
||||
for p in [0, 1, 2]:
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
peak_prominences([1, 0, 2], [p,])
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
peak_prominences([0, 1, 1, 1, 0], [2], wlen=2)
|
||||
|
||||
|
||||
class TestPeakWidths:
|
||||
|
||||
def test_empty(self):
|
||||
"""
|
||||
Test if an empty array is returned if no peaks are provided.
|
||||
"""
|
||||
widths = peak_widths([], [])[0]
|
||||
assert isinstance(widths, np.ndarray)
|
||||
assert widths.size == 0
|
||||
widths = peak_widths([1, 2, 3], [])[0]
|
||||
assert isinstance(widths, np.ndarray)
|
||||
assert widths.size == 0
|
||||
out = peak_widths([], [])
|
||||
for arr in out:
|
||||
assert isinstance(arr, np.ndarray)
|
||||
assert arr.size == 0
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
|
||||
def test_basic(self):
|
||||
"""
|
||||
Test a simple use case with easy to verify results at different relative
|
||||
heights.
|
||||
"""
|
||||
x = np.array([1, 0, 1, 2, 1, 0, -1])
|
||||
prominence = 2
|
||||
for rel_height, width_true, lip_true, rip_true in [
|
||||
(0., 0., 3., 3.), # raises warning
|
||||
(0.25, 1., 2.5, 3.5),
|
||||
(0.5, 2., 2., 4.),
|
||||
(0.75, 3., 1.5, 4.5),
|
||||
(1., 4., 1., 5.),
|
||||
(2., 5., 1., 6.),
|
||||
(3., 5., 1., 6.)
|
||||
]:
|
||||
width_calc, height, lip_calc, rip_calc = peak_widths(
|
||||
x, [3], rel_height)
|
||||
xp_assert_close(width_calc, np.asarray([width_true]))
|
||||
xp_assert_close(height, np.asarray([2 - rel_height * prominence]))
|
||||
xp_assert_close(lip_calc, np.asarray([lip_true]))
|
||||
xp_assert_close(rip_calc, np.asarray([rip_true]))
|
||||
|
||||
def test_non_contiguous(self):
|
||||
"""
|
||||
Test with non-C-contiguous input arrays.
|
||||
"""
|
||||
x = np.repeat([0, 100, 50], 4)
|
||||
peaks = np.repeat([1], 3)
|
||||
result = peak_widths(x[::4], peaks[::3])
|
||||
xp_assert_equal(result,
|
||||
np.asarray([[0.75], [75], [0.75], [1.5]])
|
||||
)
|
||||
|
||||
def test_exceptions(self):
|
||||
"""
|
||||
Verify that argument validation works as intended.
|
||||
"""
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# x with dimension > 1
|
||||
peak_widths(np.zeros((3, 4)), np.ones(3))
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# x with dimension < 1
|
||||
peak_widths(3, [0])
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# peaks with dimension > 1
|
||||
peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp))
|
||||
with raises(ValueError, match='1-D array'):
|
||||
# peaks with dimension < 1
|
||||
peak_widths(np.arange(10), 3)
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
# peak pos exceeds x.size
|
||||
peak_widths(np.arange(10), [8, 11])
|
||||
with raises(ValueError, match='not a valid index'):
|
||||
# empty x with peaks supplied
|
||||
peak_widths([], [1, 2])
|
||||
with raises(TypeError, match='cannot safely cast'):
|
||||
# peak cannot be safely cast to intp
|
||||
peak_widths(np.arange(10), [1.1, 2.3])
|
||||
with raises(ValueError, match='rel_height'):
|
||||
# rel_height is < 0
|
||||
peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1)
|
||||
with raises(TypeError, match='None'):
|
||||
# prominence data contains None
|
||||
peak_widths([1, 2, 1], [1], prominence_data=(None, None, None))
|
||||
|
||||
@pytest.mark.thread_unsafe
|
||||
def test_warnings(self):
|
||||
"""
|
||||
Verify that appropriate warnings are raised.
|
||||
"""
|
||||
msg = "some peaks have a width of 0"
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
# Case: rel_height is 0
|
||||
peak_widths([0, 1, 0], [1], rel_height=0)
|
||||
with warns(PeakPropertyWarning, match=msg):
|
||||
# Case: prominence is 0 and bases are identical
|
||||
peak_widths(
|
||||
[0, 1, 1, 1, 0], [2],
|
||||
prominence_data=(np.array([0.], np.float64),
|
||||
np.array([2], np.intp),
|
||||
np.array([2], np.intp))
|
||||
)
|
||||
|
||||
def test_mismatching_prominence_data(self):
|
||||
"""Test with mismatching peak and / or prominence data."""
|
||||
x = [0, 1, 0]
|
||||
peak = [1]
|
||||
for i, (prominences, left_bases, right_bases) in enumerate([
|
||||
((1.,), (-1,), (2,)), # left base not in x
|
||||
((1.,), (0,), (3,)), # right base not in x
|
||||
((1.,), (2,), (0,)), # swapped bases same as peak
|
||||
((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks
|
||||
((1., 1.), (0,), (2,)), # arrays with different shapes
|
||||
((1.,), (0, 0), (2,)), # arrays with different shapes
|
||||
((1.,), (0,), (2, 2)) # arrays with different shapes
|
||||
]):
|
||||
# Make sure input is matches output of signal.peak_prominences
|
||||
prominence_data = (np.array(prominences, dtype=np.float64),
|
||||
np.array(left_bases, dtype=np.intp),
|
||||
np.array(right_bases, dtype=np.intp))
|
||||
# Test for correct exception
|
||||
if i < 3:
|
||||
match = "prominence data is invalid for peak"
|
||||
else:
|
||||
match = "arrays in `prominence_data` must have the same shape"
|
||||
with raises(ValueError, match=match):
|
||||
peak_widths(x, peak, prominence_data=prominence_data)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a width of 0")
|
||||
def test_intersection_rules(self):
|
||||
"""Test if x == eval_height counts as an intersection."""
|
||||
# Flatt peak with two possible intersection points if evaluated at 1
|
||||
x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0]
|
||||
# relative height is 0 -> width is 0 as well, raises warning
|
||||
xp_assert_close(peak_widths(x, peaks=[5], rel_height=0),
|
||||
[(0.,), (3.,), (5.,), (5.,)])
|
||||
# width_height == x counts as intersection -> nearest 1 is chosen
|
||||
xp_assert_close(peak_widths(x, peaks=[5], rel_height=2/3),
|
||||
[(4.,), (1.,), (3.,), (7.,)])
|
||||
|
||||
|
||||
def test_unpack_condition_args():
|
||||
"""
|
||||
Verify parsing of condition arguments for `scipy.signal.find_peaks` function.
|
||||
"""
|
||||
x = np.arange(10)
|
||||
amin_true = x
|
||||
amax_true = amin_true + 10
|
||||
peaks = amin_true[1::2]
|
||||
|
||||
# Test unpacking with None or interval
|
||||
assert (None, None) == _unpack_condition_args((None, None), x, peaks)
|
||||
assert (1, None) == _unpack_condition_args(1, x, peaks)
|
||||
assert (1, None) == _unpack_condition_args((1, None), x, peaks)
|
||||
assert (None, 2) == _unpack_condition_args((None, 2), x, peaks)
|
||||
assert (3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks)
|
||||
|
||||
# Test if borders are correctly reduced with `peaks`
|
||||
amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks)
|
||||
xp_assert_equal(amin_calc, amin_true[peaks])
|
||||
xp_assert_equal(amax_calc, amax_true[peaks])
|
||||
|
||||
# Test raises if array borders don't match x
|
||||
with raises(ValueError, match="array size of lower"):
|
||||
_unpack_condition_args(amin_true, np.arange(11), peaks)
|
||||
with raises(ValueError, match="array size of upper"):
|
||||
_unpack_condition_args((None, amin_true), np.arange(11), peaks)
|
||||
|
||||
|
||||
class TestFindPeaks:
|
||||
|
||||
# Keys of optionally returned properties
|
||||
property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds',
|
||||
'prominences', 'left_bases', 'right_bases', 'widths',
|
||||
'width_heights', 'left_ips', 'right_ips'}
|
||||
|
||||
def test_constant(self):
|
||||
"""
|
||||
Test behavior for signal without local maxima.
|
||||
"""
|
||||
open_interval = (None, None)
|
||||
peaks, props = find_peaks(np.ones(10),
|
||||
height=open_interval, threshold=open_interval,
|
||||
prominence=open_interval, width=open_interval)
|
||||
assert peaks.size == 0
|
||||
for key in self.property_keys:
|
||||
assert props[key].size == 0
|
||||
|
||||
def test_plateau_size(self):
|
||||
"""
|
||||
Test plateau size condition for peaks.
|
||||
"""
|
||||
# Prepare signal with peaks with peak_height == plateau_size
|
||||
plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111])
|
||||
x = np.zeros(plateau_sizes.size * 2 + 1)
|
||||
x[1::2] = plateau_sizes
|
||||
repeats = np.ones(x.size, dtype=int)
|
||||
repeats[1::2] = x[1::2]
|
||||
x = np.repeat(x, repeats)
|
||||
|
||||
# Test full output
|
||||
peaks, props = find_peaks(x, plateau_size=(None, None))
|
||||
xp_assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100], check_dtype=False)
|
||||
xp_assert_equal(props["plateau_sizes"], plateau_sizes, check_dtype=False)
|
||||
xp_assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2,
|
||||
check_dtype=False)
|
||||
xp_assert_equal(props["right_edges"], peaks + plateau_sizes // 2,
|
||||
check_dtype=False)
|
||||
|
||||
# Test conditions
|
||||
xp_assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100],
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7],
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33],
|
||||
check_dtype=False)
|
||||
|
||||
def test_height_condition(self):
|
||||
"""
|
||||
Test height condition for peaks.
|
||||
"""
|
||||
x = (0., 1/3, 0., 2.5, 0, 4., 0)
|
||||
peaks, props = find_peaks(x, height=(None, None))
|
||||
xp_assert_equal(peaks, np.array([1, 3, 5]), check_dtype=False)
|
||||
xp_assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.]),
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5]),
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3]),
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3]),
|
||||
check_dtype=False)
|
||||
|
||||
def test_threshold_condition(self):
|
||||
"""
|
||||
Test threshold condition for peaks.
|
||||
"""
|
||||
x = (0, 2, 1, 4, -1)
|
||||
peaks, props = find_peaks(x, threshold=(None, None))
|
||||
xp_assert_equal(peaks, np.array([1, 3]), check_dtype=False)
|
||||
xp_assert_equal(props['left_thresholds'], np.array([2.0, 3.0]))
|
||||
xp_assert_equal(props['right_thresholds'], np.array([1.0, 5.0]))
|
||||
xp_assert_equal(find_peaks(x, threshold=2)[0], np.array([3]),
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, threshold=3.5)[0], np.array([], dtype=int),
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3]),
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1]),
|
||||
check_dtype=False)
|
||||
xp_assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([], dtype=int),
|
||||
check_dtype=False)
|
||||
|
||||
def test_distance_condition(self):
|
||||
"""
|
||||
Test distance condition for peaks.
|
||||
"""
|
||||
# Peaks of different height with constant distance 3
|
||||
peaks_all = np.arange(1, 21, 3)
|
||||
x = np.zeros(21)
|
||||
x[peaks_all] += np.linspace(1, 2, peaks_all.size)
|
||||
|
||||
# Test if peaks with "minimal" distance are still selected (distance = 3)
|
||||
xp_assert_equal(find_peaks(x, distance=3)[0], peaks_all, check_dtype=False)
|
||||
|
||||
# Select every second peak (distance > 3)
|
||||
peaks_subset = find_peaks(x, distance=3.0001)[0]
|
||||
# Test if peaks_subset is subset of peaks_all
|
||||
assert np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0
|
||||
|
||||
# Test if every second peak was removed
|
||||
dfs = np.diff(peaks_subset)
|
||||
xp_assert_equal(dfs, 6*np.ones_like(dfs))
|
||||
|
||||
# Test priority of peak removal
|
||||
x = [-2, 1, -1, 0, -3]
|
||||
peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size
|
||||
assert peaks_subset.size == 1 and peaks_subset[0] == 1
|
||||
|
||||
def test_prominence_condition(self):
|
||||
"""
|
||||
Test prominence condition for peaks.
|
||||
"""
|
||||
x = np.linspace(0, 10, 100)
|
||||
peaks_true = np.arange(1, 99, 2)
|
||||
offset = np.linspace(1, 10, peaks_true.size)
|
||||
x[peaks_true] += offset
|
||||
prominences = x[peaks_true] - x[peaks_true + 1]
|
||||
interval = (3, 9)
|
||||
keep = np.nonzero(
|
||||
(interval[0] <= prominences) & (prominences <= interval[1]))
|
||||
|
||||
peaks_calc, properties = find_peaks(x, prominence=interval)
|
||||
xp_assert_equal(peaks_calc, peaks_true[keep], check_dtype=False)
|
||||
xp_assert_equal(properties['prominences'], prominences[keep], check_dtype=False)
|
||||
xp_assert_equal(properties['left_bases'],
|
||||
np.zeros_like(properties['left_bases']))
|
||||
xp_assert_equal(properties['right_bases'], peaks_true[keep] + 1,
|
||||
check_dtype=False)
|
||||
|
||||
def test_width_condition(self):
|
||||
"""
|
||||
Test width condition for peaks.
|
||||
"""
|
||||
x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0])
|
||||
peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75)
|
||||
assert peaks.size == 1
|
||||
xp_assert_equal(peaks, 7*np.ones_like(peaks))
|
||||
xp_assert_close(props['widths'], np.asarray([1.35]))
|
||||
xp_assert_close(props['width_heights'], np.asarray([1.]))
|
||||
xp_assert_close(props['left_ips'], np.asarray([6.4]))
|
||||
xp_assert_close(props['right_ips'], np.asarray([7.75]))
|
||||
|
||||
def test_properties(self):
|
||||
"""
|
||||
Test returned properties.
|
||||
"""
|
||||
open_interval = (None, None)
|
||||
x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9]
|
||||
peaks, props = find_peaks(x,
|
||||
height=open_interval, threshold=open_interval,
|
||||
prominence=open_interval, width=open_interval)
|
||||
assert len(props) == len(self.property_keys)
|
||||
for key in self.property_keys:
|
||||
assert peaks.size == props[key].size
|
||||
|
||||
def test_raises(self):
|
||||
"""
|
||||
Test exceptions raised by function.
|
||||
"""
|
||||
with raises(ValueError, match="1-D array"):
|
||||
find_peaks(np.array(1))
|
||||
with raises(ValueError, match="1-D array"):
|
||||
find_peaks(np.ones((2, 2)))
|
||||
with raises(ValueError, match="distance"):
|
||||
find_peaks(np.arange(10), distance=-1)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0",
|
||||
"ignore:some peaks have a width of 0")
|
||||
def test_wlen_smaller_plateau(self):
|
||||
"""
|
||||
Test behavior of prominence and width calculation if the given window
|
||||
length is smaller than a peak's plateau size.
|
||||
|
||||
Regression test for gh-9110.
|
||||
"""
|
||||
peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None),
|
||||
width=(None, None), wlen=2)
|
||||
xp_assert_equal(peaks, 2 * np.ones_like(peaks))
|
||||
xp_assert_equal(props["prominences"], np.zeros_like(props["prominences"]))
|
||||
xp_assert_equal(props["widths"], np.zeros_like(props["widths"]))
|
||||
xp_assert_equal(props["width_heights"], np.ones_like(props["width_heights"]))
|
||||
for key in ("left_bases", "right_bases", "left_ips", "right_ips"):
|
||||
xp_assert_equal(props[key], peaks, check_dtype=False)
|
||||
|
||||
@pytest.mark.parametrize("kwargs", [
|
||||
{},
|
||||
{"distance": 3.0},
|
||||
{"prominence": (None, None)},
|
||||
{"width": (None, 2)},
|
||||
|
||||
])
|
||||
def test_readonly_array(self, kwargs):
|
||||
"""
|
||||
Test readonly arrays are accepted.
|
||||
"""
|
||||
x = np.linspace(0, 10, 15)
|
||||
x_readonly = x.copy()
|
||||
x_readonly.flags.writeable = False
|
||||
|
||||
peaks, _ = find_peaks(x)
|
||||
peaks_readonly, _ = find_peaks(x_readonly, **kwargs)
|
||||
|
||||
xp_assert_close(peaks, peaks_readonly)
|
||||
|
||||
|
||||
class TestFindPeaksCwt:
|
||||
|
||||
def test_find_peaks_exact(self):
|
||||
"""
|
||||
Generate a series of gaussians and attempt to find the peak locations.
|
||||
"""
|
||||
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
|
||||
num_points = 500
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas))
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0,
|
||||
min_length=None)
|
||||
xp_assert_equal(found_locs, act_locs,
|
||||
check_dtype=False,
|
||||
err_msg="Found maximum locations did not equal those expected"
|
||||
)
|
||||
|
||||
def test_find_peaks_withnoise(self):
|
||||
"""
|
||||
Verify that peak locations are (approximately) found
|
||||
for a series of gaussians with added noise.
|
||||
"""
|
||||
sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0]
|
||||
num_points = 500
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas))
|
||||
noise_amp = 0.07
|
||||
np.random.seed(18181911)
|
||||
test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
found_locs = find_peaks_cwt(test_data, widths, min_length=15,
|
||||
gap_thresh=1, min_snr=noise_amp / 5)
|
||||
|
||||
err_msg ='Different number of peaks found than expected'
|
||||
assert len(found_locs) == len(act_locs), err_msg
|
||||
diffs = np.abs(found_locs - act_locs)
|
||||
max_diffs = np.array(sigmas) / 5
|
||||
np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' +
|
||||
f'by more than {max_diffs}')
|
||||
|
||||
def test_find_peaks_nopeak(self):
|
||||
"""
|
||||
Verify that no peak is found in
|
||||
data that's just noise.
|
||||
"""
|
||||
noise_amp = 1.0
|
||||
num_points = 100
|
||||
rng = np.random.RandomState(181819141)
|
||||
test_data = (rng.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
widths = np.arange(10, 50)
|
||||
found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30)
|
||||
assert len(found_locs) == 0
|
||||
|
||||
def test_find_peaks_with_non_default_wavelets(self):
|
||||
x = gaussian(200, 2)
|
||||
widths = np.array([1, 2, 3, 4])
|
||||
a = find_peaks_cwt(x, widths, wavelet=gaussian)
|
||||
|
||||
xp_assert_equal(a, np.asarray([100]), check_dtype=False)
|
||||
|
||||
def test_find_peaks_window_size(self):
|
||||
"""
|
||||
Verify that window_size is passed correctly to private function and
|
||||
affects the result.
|
||||
"""
|
||||
sigmas = [2.0, 2.0]
|
||||
num_points = 1000
|
||||
test_data, act_locs = _gen_gaussians_even(sigmas, num_points)
|
||||
widths = np.arange(0.1, max(sigmas), 0.2)
|
||||
noise_amp = 0.05
|
||||
rng = np.random.RandomState(18181911)
|
||||
test_data += (rng.rand(num_points) - 0.5)*(2*noise_amp)
|
||||
|
||||
# Possibly contrived negative region to throw off peak finding
|
||||
# when window_size is too large
|
||||
test_data[250:320] -= 1
|
||||
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
|
||||
min_length=None, window_size=None)
|
||||
with pytest.raises(AssertionError):
|
||||
assert found_locs.size == act_locs.size
|
||||
|
||||
found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3,
|
||||
min_length=None, window_size=20)
|
||||
assert found_locs.size == act_locs.size
|
||||
|
||||
def test_find_peaks_with_one_width(self):
|
||||
"""
|
||||
Verify that the `width` argument
|
||||
in `find_peaks_cwt` can be a float
|
||||
"""
|
||||
xs = np.arange(0, np.pi, 0.05)
|
||||
test_data = np.sin(xs)
|
||||
widths = 1
|
||||
found_locs = find_peaks_cwt(test_data, widths)
|
||||
|
||||
np.testing.assert_equal(found_locs, 32)
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
# Regressions tests on result types of some signal functions
|
||||
|
||||
import numpy as np
|
||||
|
||||
from scipy.signal import (decimate,
|
||||
lfilter_zi,
|
||||
lfiltic,
|
||||
sos2tf,
|
||||
sosfilt_zi)
|
||||
|
||||
|
||||
def test_decimate():
|
||||
ones_f32 = np.ones(32, dtype=np.float32)
|
||||
assert decimate(ones_f32, 2).dtype == np.float32
|
||||
|
||||
ones_i64 = np.ones(32, dtype=np.int64)
|
||||
assert decimate(ones_i64, 2).dtype == np.float64
|
||||
|
||||
|
||||
def test_lfilter_zi():
|
||||
b_f32 = np.array([1, 2, 3], dtype=np.float32)
|
||||
a_f32 = np.array([4, 5, 6], dtype=np.float32)
|
||||
assert lfilter_zi(b_f32, a_f32).dtype == np.float32
|
||||
|
||||
|
||||
def test_lfiltic():
|
||||
# this would return f32 when given a mix of f32 / f64 args
|
||||
b_f32 = np.array([1, 2, 3], dtype=np.float32)
|
||||
a_f32 = np.array([4, 5, 6], dtype=np.float32)
|
||||
x_f32 = np.ones(32, dtype=np.float32)
|
||||
|
||||
b_f64 = b_f32.astype(np.float64)
|
||||
a_f64 = a_f32.astype(np.float64)
|
||||
x_f64 = x_f32.astype(np.float64)
|
||||
|
||||
assert lfiltic(b_f64, a_f32, x_f32).dtype == np.float64
|
||||
assert lfiltic(b_f32, a_f64, x_f32).dtype == np.float64
|
||||
assert lfiltic(b_f32, a_f32, x_f64).dtype == np.float64
|
||||
assert lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64
|
||||
|
||||
|
||||
def test_sos2tf():
|
||||
sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32)
|
||||
b, a = sos2tf(sos_f32)
|
||||
assert b.dtype == np.float32
|
||||
assert a.dtype == np.float32
|
||||
|
||||
|
||||
def test_sosfilt_zi():
|
||||
sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32)
|
||||
assert sosfilt_zi(sos_f32).dtype == np.float32
|
||||
|
|
@ -0,0 +1,363 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_equal,
|
||||
assert_array_equal,
|
||||
)
|
||||
|
||||
from scipy._lib._array_api import (
|
||||
assert_almost_equal, assert_array_almost_equal, xp_assert_close
|
||||
)
|
||||
|
||||
from scipy.ndimage import convolve1d # type: ignore[attr-defined]
|
||||
|
||||
from scipy.signal import savgol_coeffs, savgol_filter
|
||||
from scipy.signal._savitzky_golay import _polyder
|
||||
|
||||
|
||||
def check_polyder(p, m, expected):
|
||||
dp = _polyder(p, m)
|
||||
assert_array_equal(dp, expected)
|
||||
|
||||
|
||||
def test_polyder():
|
||||
cases = [
|
||||
([5], 0, [5]),
|
||||
([5], 1, [0]),
|
||||
([3, 2, 1], 0, [3, 2, 1]),
|
||||
([3, 2, 1], 1, [6, 2]),
|
||||
([3, 2, 1], 2, [6]),
|
||||
([3, 2, 1], 3, [0]),
|
||||
([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]),
|
||||
([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]),
|
||||
]
|
||||
for p, m, expected in cases:
|
||||
check_polyder(np.array(p).T, m, np.array(expected).T)
|
||||
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# savgol_coeffs tests
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
def alt_sg_coeffs(window_length, polyorder, pos):
|
||||
"""This is an alternative implementation of the SG coefficients.
|
||||
|
||||
It uses numpy.polyfit and numpy.polyval. The results should be
|
||||
equivalent to those of savgol_coeffs(), but this implementation
|
||||
is slower.
|
||||
|
||||
window_length should be odd.
|
||||
|
||||
"""
|
||||
if pos is None:
|
||||
pos = window_length // 2
|
||||
t = np.arange(window_length)
|
||||
unit = (t == pos).astype(int)
|
||||
h = np.polyval(np.polyfit(t, unit, polyorder), t)
|
||||
return h
|
||||
|
||||
|
||||
def test_sg_coeffs_trivial():
|
||||
# Test a trivial case of savgol_coeffs: polyorder = window_length - 1
|
||||
h = savgol_coeffs(1, 0)
|
||||
xp_assert_close(h, [1.0])
|
||||
|
||||
h = savgol_coeffs(3, 2)
|
||||
xp_assert_close(h, [0.0, 1, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4)
|
||||
xp_assert_close(h, [0.0, 0, 1, 0, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4, pos=1)
|
||||
xp_assert_close(h, [0.0, 0, 0, 1, 0], atol=1e-10)
|
||||
|
||||
h = savgol_coeffs(5, 4, pos=1, use='dot')
|
||||
xp_assert_close(h, [0.0, 1, 0, 0, 0], atol=1e-10)
|
||||
|
||||
|
||||
def compare_coeffs_to_alt(window_length, order):
|
||||
# For the given window_length and order, compare the results
|
||||
# of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1.
|
||||
# Also include pos=None.
|
||||
for pos in [None] + list(range(window_length)):
|
||||
h1 = savgol_coeffs(window_length, order, pos=pos, use='dot')
|
||||
h2 = alt_sg_coeffs(window_length, order, pos=pos)
|
||||
xp_assert_close(
|
||||
h1, h2, atol=1e-10,
|
||||
err_msg=f"window_length = {window_length}, order = {order}, pos = {pos}"
|
||||
)
|
||||
|
||||
|
||||
def test_sg_coeffs_compare():
|
||||
# Compare savgol_coeffs() to alt_sg_coeffs().
|
||||
for window_length in range(1, 8, 2):
|
||||
for order in range(window_length):
|
||||
compare_coeffs_to_alt(window_length, order)
|
||||
|
||||
|
||||
def test_sg_coeffs_exact():
|
||||
polyorder = 4
|
||||
window_length = 9
|
||||
halflen = window_length // 2
|
||||
|
||||
x = np.linspace(0, 21, 43)
|
||||
delta = x[1] - x[0]
|
||||
|
||||
# The data is a cubic polynomial. We'll use an order 4
|
||||
# SG filter, so the filtered values should equal the input data
|
||||
# (except within half window_length of the edges).
|
||||
y = 0.5 * x ** 3 - x
|
||||
h = savgol_coeffs(window_length, polyorder)
|
||||
y0 = convolve1d(y, h)
|
||||
xp_assert_close(y0[halflen:-halflen], y[halflen:-halflen])
|
||||
|
||||
# Check the same input, but use deriv=1. dy is the exact result.
|
||||
dy = 1.5 * x ** 2 - 1
|
||||
h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta)
|
||||
y1 = convolve1d(y, h)
|
||||
xp_assert_close(y1[halflen:-halflen], dy[halflen:-halflen])
|
||||
|
||||
# Check the same input, but use deriv=2. d2y is the exact result.
|
||||
d2y = 3.0 * x
|
||||
h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta)
|
||||
y2 = convolve1d(y, h)
|
||||
xp_assert_close(y2[halflen:-halflen], d2y[halflen:-halflen])
|
||||
|
||||
|
||||
def test_sg_coeffs_deriv():
|
||||
# The data in `x` is a sampled parabola, so using savgol_coeffs with an
|
||||
# order 2 or higher polynomial should give exact results.
|
||||
i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0])
|
||||
x = i ** 2 / 4
|
||||
dx = i / 2
|
||||
d2x = np.full_like(i, 0.5)
|
||||
for pos in range(x.size):
|
||||
coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot')
|
||||
xp_assert_close(coeffs0.dot(x), x[pos], atol=1e-10)
|
||||
coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1)
|
||||
xp_assert_close(coeffs1.dot(x), dx[pos], atol=1e-10)
|
||||
coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2)
|
||||
xp_assert_close(coeffs2.dot(x), d2x[pos], atol=1e-10)
|
||||
|
||||
|
||||
def test_sg_coeffs_deriv_gt_polyorder():
|
||||
"""
|
||||
If deriv > polyorder, the coefficients should be all 0.
|
||||
This is a regression test for a bug where, e.g.,
|
||||
savgol_coeffs(5, polyorder=1, deriv=2)
|
||||
raised an error.
|
||||
"""
|
||||
coeffs = savgol_coeffs(5, polyorder=1, deriv=2)
|
||||
assert_array_equal(coeffs, np.zeros(5))
|
||||
coeffs = savgol_coeffs(7, polyorder=4, deriv=6)
|
||||
assert_array_equal(coeffs, np.zeros(7))
|
||||
|
||||
|
||||
def test_sg_coeffs_large():
|
||||
# Test that for large values of window_length and polyorder the array of
|
||||
# coefficients returned is symmetric. The aim is to ensure that
|
||||
# no potential numeric overflow occurs.
|
||||
coeffs0 = savgol_coeffs(31, 9)
|
||||
assert_array_almost_equal(coeffs0, coeffs0[::-1])
|
||||
coeffs1 = savgol_coeffs(31, 9, deriv=1)
|
||||
assert_array_almost_equal(coeffs1, -coeffs1[::-1])
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# savgol_coeffs tests for even window length
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_sg_coeffs_even_window_length():
|
||||
# Simple case - deriv=0, polyorder=0, 1
|
||||
window_lengths = [4, 6, 8, 10, 12, 14, 16]
|
||||
for length in window_lengths:
|
||||
h_p_d = savgol_coeffs(length, 0, 0)
|
||||
xp_assert_close(h_p_d, np.ones_like(h_p_d) / length)
|
||||
|
||||
# Verify with closed forms
|
||||
# deriv=1, polyorder=1, 2
|
||||
def h_p_d_closed_form_1(k, m):
|
||||
return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1))
|
||||
|
||||
# deriv=2, polyorder=2
|
||||
def h_p_d_closed_form_2(k, m):
|
||||
numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2)
|
||||
denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1)
|
||||
return numer/denom
|
||||
|
||||
for length in window_lengths:
|
||||
m = length//2
|
||||
expected_output = [h_p_d_closed_form_1(k, m)
|
||||
for k in range(-m + 1, m + 1)][::-1]
|
||||
actual_output = savgol_coeffs(length, 1, 1)
|
||||
xp_assert_close(expected_output, actual_output)
|
||||
actual_output = savgol_coeffs(length, 2, 1)
|
||||
xp_assert_close(expected_output, actual_output)
|
||||
|
||||
expected_output = [h_p_d_closed_form_2(k, m)
|
||||
for k in range(-m + 1, m + 1)][::-1]
|
||||
actual_output = savgol_coeffs(length, 2, 2)
|
||||
xp_assert_close(expected_output, actual_output)
|
||||
actual_output = savgol_coeffs(length, 3, 2)
|
||||
xp_assert_close(expected_output, actual_output)
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# savgol_filter tests
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_sg_filter_trivial():
|
||||
""" Test some trivial edge cases for savgol_filter()."""
|
||||
x = np.array([1.0])
|
||||
y = savgol_filter(x, 1, 0)
|
||||
assert_equal(y, [1.0])
|
||||
|
||||
# Input is a single value. With a window length of 3 and polyorder 1,
|
||||
# the value in y is from the straight-line fit of (-1,0), (0,3) and
|
||||
# (1, 0) at 0. This is just the average of the three values, hence 1.0.
|
||||
x = np.array([3.0])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
assert_almost_equal(y, [1.0], decimal=15)
|
||||
|
||||
x = np.array([3.0])
|
||||
y = savgol_filter(x, 3, 1, mode='nearest')
|
||||
assert_almost_equal(y, [3.0], decimal=15)
|
||||
|
||||
x = np.array([1.0] * 3)
|
||||
y = savgol_filter(x, 3, 1, mode='wrap')
|
||||
assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15)
|
||||
|
||||
|
||||
def test_sg_filter_basic():
|
||||
# Some basic test cases for savgol_filter().
|
||||
x = np.array([1.0, 2.0, 1.0])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
xp_assert_close(y, [1.0, 4.0 / 3, 1.0])
|
||||
|
||||
y = savgol_filter(x, 3, 1, mode='mirror')
|
||||
xp_assert_close(y, [5.0 / 3, 4.0 / 3, 5.0 / 3])
|
||||
|
||||
y = savgol_filter(x, 3, 1, mode='wrap')
|
||||
xp_assert_close(y, [4.0 / 3, 4.0 / 3, 4.0 / 3])
|
||||
|
||||
|
||||
def test_sg_filter_2d():
|
||||
x = np.array([[1.0, 2.0, 1.0],
|
||||
[2.0, 4.0, 2.0]])
|
||||
expected = np.array([[1.0, 4.0 / 3, 1.0],
|
||||
[2.0, 8.0 / 3, 2.0]])
|
||||
y = savgol_filter(x, 3, 1, mode='constant')
|
||||
xp_assert_close(y, expected)
|
||||
|
||||
y = savgol_filter(x.T, 3, 1, mode='constant', axis=0)
|
||||
xp_assert_close(y, expected.T)
|
||||
|
||||
|
||||
def test_sg_filter_interp_edges():
|
||||
# Another test with low degree polynomial data, for which we can easily
|
||||
# give the exact results. In this test, we use mode='interp', so
|
||||
# savgol_filter should match the exact solution for the entire data set,
|
||||
# including the edges.
|
||||
t = np.linspace(-5, 5, 21)
|
||||
delta = t[1] - t[0]
|
||||
# Polynomial test data.
|
||||
x = np.array([t,
|
||||
3 * t ** 2,
|
||||
t ** 3 - t])
|
||||
dx = np.array([np.ones_like(t),
|
||||
6 * t,
|
||||
3 * t ** 2 - 1.0])
|
||||
d2x = np.array([np.zeros_like(t),
|
||||
np.full_like(t, 6),
|
||||
6 * t])
|
||||
|
||||
window_length = 7
|
||||
|
||||
y = savgol_filter(x, window_length, 3, axis=-1, mode='interp')
|
||||
xp_assert_close(y, x, atol=1e-12)
|
||||
|
||||
y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
|
||||
deriv=1, delta=delta)
|
||||
xp_assert_close(y1, dx, atol=1e-12)
|
||||
|
||||
y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp',
|
||||
deriv=2, delta=delta)
|
||||
xp_assert_close(y2, d2x, atol=1e-12)
|
||||
|
||||
# Transpose everything, and test again with axis=0.
|
||||
|
||||
x = x.T
|
||||
dx = dx.T
|
||||
d2x = d2x.T
|
||||
|
||||
y = savgol_filter(x, window_length, 3, axis=0, mode='interp')
|
||||
xp_assert_close(y, x, atol=1e-12)
|
||||
|
||||
y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
|
||||
deriv=1, delta=delta)
|
||||
xp_assert_close(y1, dx, atol=1e-12)
|
||||
|
||||
y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp',
|
||||
deriv=2, delta=delta)
|
||||
xp_assert_close(y2, d2x, atol=1e-12)
|
||||
|
||||
|
||||
def test_sg_filter_interp_edges_3d():
|
||||
# Test mode='interp' with a 3-D array.
|
||||
t = np.linspace(-5, 5, 21)
|
||||
delta = t[1] - t[0]
|
||||
x1 = np.array([t, -t])
|
||||
x2 = np.array([t ** 2, 3 * t ** 2 + 5])
|
||||
x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t])
|
||||
dx1 = np.array([np.ones_like(t), -np.ones_like(t)])
|
||||
dx2 = np.array([2 * t, 6 * t])
|
||||
dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5])
|
||||
|
||||
# z has shape (3, 2, 21)
|
||||
z = np.array([x1, x2, x3])
|
||||
dz = np.array([dx1, dx2, dx3])
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta)
|
||||
xp_assert_close(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta)
|
||||
xp_assert_close(dy, dz, atol=1e-10)
|
||||
|
||||
# z has shape (3, 21, 2)
|
||||
z = np.array([x1.T, x2.T, x3.T])
|
||||
dz = np.array([dx1.T, dx2.T, dx3.T])
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta)
|
||||
xp_assert_close(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta)
|
||||
xp_assert_close(dy, dz, atol=1e-10)
|
||||
|
||||
# z has shape (21, 3, 2)
|
||||
z = z.swapaxes(0, 1).copy()
|
||||
dz = dz.swapaxes(0, 1).copy()
|
||||
|
||||
y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta)
|
||||
xp_assert_close(y, z, atol=1e-10)
|
||||
|
||||
dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta)
|
||||
xp_assert_close(dy, dz, atol=1e-10)
|
||||
|
||||
|
||||
def test_sg_filter_valid_window_length_3d():
|
||||
"""Tests that the window_length check is using the correct axis."""
|
||||
|
||||
x = np.ones((10, 20, 30))
|
||||
|
||||
savgol_filter(x, window_length=29, polyorder=3, mode='interp')
|
||||
|
||||
with pytest.raises(ValueError, match='window_length must be less than'):
|
||||
# window_length is more than x.shape[-1].
|
||||
savgol_filter(x, window_length=31, polyorder=3, mode='interp')
|
||||
|
||||
savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp')
|
||||
|
||||
with pytest.raises(ValueError, match='window_length must be less than'):
|
||||
# window_length is more than x.shape[0].
|
||||
savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp')
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,427 @@
|
|||
# pylint: disable=missing-docstring
|
||||
import math
|
||||
import numpy as np
|
||||
import pytest
|
||||
import scipy._lib.array_api_extra as xpx
|
||||
from scipy._lib._array_api import is_cupy, xp_assert_close, xp_default_dtype
|
||||
|
||||
from scipy.signal._spline import (
|
||||
symiirorder1_ic, symiirorder2_ic_fwd, symiirorder2_ic_bwd)
|
||||
from scipy.signal import symiirorder1, symiirorder2
|
||||
|
||||
skip_xp_backends = pytest.mark.skip_xp_backends
|
||||
xfail_xp_backends = pytest.mark.xfail_xp_backends
|
||||
|
||||
|
||||
def npr(xp, *args):
|
||||
return xp.concat(tuple(xpx.atleast_nd(x, ndim=1, xp=xp) for x in args))
|
||||
|
||||
|
||||
def _compute_symiirorder2_bwd_hs(k, cs, rsq, omega):
|
||||
cssq = cs * cs
|
||||
k = np.abs(k)
|
||||
rsupk = np.power(rsq, k / 2.0)
|
||||
|
||||
c0 = (cssq * (1.0 + rsq) / (1.0 - rsq) /
|
||||
(1 - 2 * rsq * np.cos(2 * omega) + rsq * rsq))
|
||||
gamma = (1.0 - rsq) / (1.0 + rsq) / np.tan(omega)
|
||||
return c0 * rsupk * (np.cos(omega * k) + gamma * np.sin(omega * k))
|
||||
|
||||
|
||||
class TestSymIIR:
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="_ic functions are private and numpy-only")
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', ['float32', 'float64', 'complex64', 'complex128'])
|
||||
@pytest.mark.parametrize('precision', [-1.0, 0.7, 0.5, 0.25, 0.0075])
|
||||
def test_symiir1_ic(self, dtype, precision, xp):
|
||||
|
||||
dtype = getattr(xp, dtype)
|
||||
|
||||
c_precision = precision
|
||||
if precision <= 0.0 or precision > 1.0:
|
||||
if dtype in {xp.float32, xp.complex64}:
|
||||
c_precision = 1e-6
|
||||
else:
|
||||
c_precision = 1e-11
|
||||
|
||||
# Symmetrical initial conditions for a IIR filter of order 1 are:
|
||||
# x[0] + z1 * \sum{k = 0}^{n - 1} x[k] * z1^k
|
||||
|
||||
# Check the initial condition for a low-pass filter
|
||||
# with coefficient b = 0.85 on a step signal. The initial condition is
|
||||
# a geometric series: 1 + b * \sum_{k = 0}^{n - 1} u[k] b^k.
|
||||
|
||||
# Finding the initial condition corresponds to
|
||||
# 1. Computing the index n such that b**n < precision, which
|
||||
# corresponds to ceil(log(precision) / log(b))
|
||||
# 2. Computing the geometric series until n, this can be computed
|
||||
# using the partial sum formula: (1 - b**n) / (1 - b)
|
||||
# This holds due to the input being a step signal.
|
||||
b = 0.85
|
||||
n_exp = int(math.ceil(math.log(c_precision) / math.log(b)))
|
||||
expected = xp.asarray([[(1 - b ** n_exp) / (1 - b)]], dtype=dtype)
|
||||
expected = 1 + b * expected
|
||||
|
||||
# Create a step signal of size n + 1
|
||||
x = xp.ones(n_exp + 1, dtype=dtype)
|
||||
xp_assert_close(symiirorder1_ic(x, b, precision), expected,
|
||||
atol=2e-6, rtol=2e-7)
|
||||
|
||||
# Check the conditions for a exponential decreasing signal with base 2.
|
||||
# Same conditions hold, as the product of 0.5^n * 0.85^n is
|
||||
# still a geometric series
|
||||
b_d = xp.asarray(b, dtype=dtype)
|
||||
expected = np.asarray(
|
||||
[[(1 - (0.5 * b_d) ** n_exp) / (1 - (0.5 * b_d))]], dtype=dtype)
|
||||
expected = 1 + b_d * expected
|
||||
|
||||
# Create an exponential decreasing signal of size n + 1
|
||||
x = 2 ** -xp.arange(n_exp + 1, dtype=dtype)
|
||||
xp_assert_close(symiirorder1_ic(x, b, precision), expected,
|
||||
atol=2e-6, rtol=2e-7)
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="_ic functions are private and numpy-only")
|
||||
def test_symiir1_ic_fails(self, xp):
|
||||
# Test that symiirorder1_ic fails whenever \sum_{n = 1}^{n} b^n > eps
|
||||
b = 0.85
|
||||
# Create a step signal of size 100
|
||||
x = xp.ones(100, dtype=xp.float64)
|
||||
|
||||
# Compute the closed form for the geometrical series
|
||||
precision = 1 / (1 - b)
|
||||
pytest.raises(ValueError, symiirorder1_ic, x, b, precision)
|
||||
|
||||
# Test that symiirorder1_ic fails when |z1| >= 1
|
||||
pytest.raises(ValueError, symiirorder1_ic, x, 1.0, -1)
|
||||
pytest.raises(ValueError, symiirorder1_ic, x, 2.0, -1)
|
||||
|
||||
@skip_xp_backends(
|
||||
cpu_only=True, exceptions=["cupy"], reason="internals are numpy-only"
|
||||
)
|
||||
@xfail_xp_backends("cupy", reason="sum did not converge")
|
||||
@skip_xp_backends("jax.numpy", reason="item assignment in tests")
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', ['float32', 'float64', 'complex64', 'complex128'])
|
||||
@pytest.mark.parametrize('precision', [-1.0, 0.7, 0.5, 0.25, 0.0075])
|
||||
def test_symiir1(self, dtype, precision, xp):
|
||||
dtype = getattr(xp, dtype)
|
||||
|
||||
c_precision = precision
|
||||
if precision <= 0.0 or precision > 1.0:
|
||||
if dtype in {xp.float32, xp.complex64}:
|
||||
c_precision = 1e-6
|
||||
else:
|
||||
c_precision = 1e-11
|
||||
|
||||
# Test for a low-pass filter with c0 = 0.15 and z1 = 0.85
|
||||
# using an unit step over 200 samples.
|
||||
c0 = 0.15
|
||||
z1 = 0.85
|
||||
n = 200
|
||||
signal = xp.ones(n, dtype=dtype)
|
||||
|
||||
# Find the initial condition. See test_symiir1_ic for a detailed
|
||||
# explanation
|
||||
n_exp = int(math.ceil(math.log(c_precision) / math.log(z1)))
|
||||
initial = xp.asarray((1 - z1 ** n_exp) / (1 - z1), dtype=dtype)
|
||||
initial = 1 + z1 * initial
|
||||
|
||||
# Forward pass
|
||||
# The transfer function for the system 1 / (1 - z1 * z^-1) when
|
||||
# applied to an unit step with initial conditions y0 is
|
||||
# 1 / (1 - z1 * z^-1) * (z^-1 / (1 - z^-1) + y0)
|
||||
|
||||
# Solving the inverse Z-transform for the given expression yields:
|
||||
# y[n] = y0 * z1**n * u[n] +
|
||||
# -z1 / (1 - z1) * z1**(k - 1) * u[k - 1] +
|
||||
# 1 / (1 - z1) * u[k - 1]
|
||||
# d is the Kronecker delta function, and u is the unit step
|
||||
|
||||
# y0 * z1**n * u[n]
|
||||
pos = xp.astype(xp.arange(n), dtype)
|
||||
comp1 = initial * z1**pos
|
||||
|
||||
# -z1 / (1 - z1) * z1**(k - 1) * u[k - 1]
|
||||
comp2 = xp.zeros(n, dtype=dtype)
|
||||
comp2[1:] = -z1 / (1 - z1) * z1**pos[:-1]
|
||||
|
||||
# 1 / (1 - z1) * u[k - 1]
|
||||
comp3 = xp.zeros(n, dtype=dtype)
|
||||
comp3[1:] = 1 / (1 - z1)
|
||||
|
||||
expected_fwd = comp1 + comp2 + comp3
|
||||
|
||||
# Reverse condition
|
||||
sym_cond = -c0 / (z1 - 1.0) * expected_fwd[-1]
|
||||
|
||||
# Backward pass
|
||||
# The transfer function for the forward result is equivalent to
|
||||
# the forward system times c0 / (1 - z1 * z).
|
||||
|
||||
# Computing a closed form for the complete expression is difficult
|
||||
# The result will be computed iteratively from the difference equation
|
||||
exp_out = xp.zeros(n, dtype=dtype)
|
||||
exp_out[0] = sym_cond
|
||||
|
||||
for i in range(1, n):
|
||||
exp_out[i] = c0 * expected_fwd[n - 1 - i] + z1 * exp_out[i - 1]
|
||||
|
||||
exp_out = xp.flip(exp_out)
|
||||
|
||||
out = symiirorder1(signal, c0, z1, precision)
|
||||
xp_assert_close(out, exp_out, atol=4e-6, rtol=6e-7)
|
||||
|
||||
@xfail_xp_backends("cupy", reason="sum did not converge")
|
||||
@skip_xp_backends(
|
||||
cpu_only=True, exceptions=["cupy"], reason="internals are numpy-only"
|
||||
)
|
||||
@pytest.mark.parametrize('dtype', ['float32', 'float64'])
|
||||
def test_symiir1_values(self, dtype, xp):
|
||||
rng = np.random.RandomState(1234)
|
||||
s = rng.uniform(size=16).astype(dtype)
|
||||
dtype = getattr(xp, dtype)
|
||||
s = xp.asarray(s)
|
||||
res = symiirorder1(s, 0.5, 0.1)
|
||||
|
||||
# values from scipy 1.9.1
|
||||
exp_res = xp.asarray([
|
||||
0.14387447, 0.35166047, 0.29735238, 0.46295986, 0.45174927,
|
||||
0.19982875, 0.20355805, 0.47378628, 0.57232247, 0.51597393,
|
||||
0.25935107, 0.31438554, 0.41096728, 0.4190693 , 0.25812255,
|
||||
0.33671467], dtype=res.dtype)
|
||||
atol = {xp.float64: 1e-15, xp.float32: 1e-7}[dtype]
|
||||
xp_assert_close(res, exp_res, atol=atol)
|
||||
|
||||
I1 = xp.asarray(
|
||||
1 + 1j, dtype=xp.result_type(s, xp.complex64)
|
||||
)
|
||||
s = s * I1
|
||||
res = symiirorder1(s, 0.5, 0.1)
|
||||
assert res.dtype == xp.complex64 if dtype == xp.float32 else xp.complex128
|
||||
xp_assert_close(res, I1 * exp_res, atol=atol)
|
||||
|
||||
@skip_xp_backends(np_only=True,
|
||||
reason="_initial_fwd functions are private and numpy-only")
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', ['float32', 'float64'])
|
||||
@pytest.mark.parametrize('precision', [-1.0, 0.7, 0.5, 0.25, 0.0075])
|
||||
def test_symiir2_initial_fwd(self, dtype, precision, xp):
|
||||
dtype = getattr(xp, dtype)
|
||||
c_precision = precision
|
||||
if precision <= 0.0 or precision > 1.0:
|
||||
if dtype in {xp.float32, xp.complex64}:
|
||||
c_precision = 1e-6
|
||||
else:
|
||||
c_precision = 1e-11
|
||||
|
||||
# Compute the initial conditions for a order-two symmetrical low-pass
|
||||
# filter with r = 0.5 and omega = pi / 3 for an unit step input.
|
||||
r = xp.asarray(0.5, dtype=dtype)
|
||||
omega = xp.asarray(np.pi / 3.0, dtype=dtype)
|
||||
cs = 1 - 2 * r * xp.cos(omega) + r**2
|
||||
|
||||
# The index n for the initial condition is bound from 0 to the
|
||||
# first position where sin(omega * (n + 2)) = 0 => omega * (n + 2) = pi
|
||||
# For omega = pi / 3, the maximum initial condition occurs when
|
||||
# sqrt(3) / 2 * r**n < precision.
|
||||
# => n = log(2 * sqrt(3) / 3 * precision) / log(r)
|
||||
ub = xp.ceil(xp.log(c_precision / xp.sin(omega)) / math.log(c_precision))
|
||||
lb = xp.ceil(math.pi / omega) - 2
|
||||
n_exp = min(ub, lb)
|
||||
|
||||
# The forward initial condition for a filter of order two is:
|
||||
# \frac{cs}{\sin(\omega)} \sum_{n = 0}^{N - 1} {
|
||||
# r^(n + 1) \sin{\omega(n + 2)}} + cs
|
||||
# The closed expression for this sum is:
|
||||
# s[n] = 2 * r * np.cos(omega) -
|
||||
# r**2 - r**(n + 2) * np.sin(omega * (n + 3)) / np.sin(omega) +
|
||||
# r**(n + 3) * np.sin(omega * (n + 2)) / np.sin(omega) + cs
|
||||
fwd_initial_1 = (
|
||||
cs +
|
||||
2 * r * xp.cos(omega) -
|
||||
r**2 -
|
||||
r**(n_exp + 2) * xp.sin(omega * (n_exp + 3)) / xp.sin(omega) +
|
||||
r**(n_exp + 3) * xp.sin(omega * (n_exp + 2)) / xp.sin(omega))
|
||||
|
||||
# The second initial condition is given by
|
||||
# s[n] = 1 / np.sin(omega) * (
|
||||
# r**2 * np.sin(3 * omega) -
|
||||
# r**3 * np.sin(2 * omega) -
|
||||
# r**(n + 3) * np.sin(omega * (n + 4)) +
|
||||
# r**(n + 4) * np.sin(omega * (n + 3)))
|
||||
ub = xp.ceil(xp.log(c_precision / xp.sin(omega)) / math.log(c_precision))
|
||||
lb = xp.ceil(xp.pi / omega) - 3
|
||||
n_exp = min(ub, lb)
|
||||
|
||||
fwd_initial_2 = (
|
||||
cs + cs * 2 * r * xp.cos(omega) +
|
||||
(r**2 * xp.sin(3 * omega) -
|
||||
r**3 * xp.sin(2 * omega) -
|
||||
r**(n_exp + 3) * xp.sin(omega * (n_exp + 4)) +
|
||||
r**(n_exp + 4) * xp.sin(omega * (n_exp + 3))) / xp.sin(omega))
|
||||
|
||||
expected = npr(xp, fwd_initial_1, fwd_initial_2)[None, :]
|
||||
expected = xp.astype(expected, dtype)
|
||||
|
||||
n = 100
|
||||
signal = np.ones(n, dtype=dtype)
|
||||
|
||||
out = symiirorder2_ic_fwd(signal, r, omega, precision)
|
||||
xp_assert_close(out, expected, atol=4e-6, rtol=6e-7)
|
||||
|
||||
@skip_xp_backends(np_only=True,
|
||||
reason="_initial_bwd functions are private and numpy-only")
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', ['float32', 'float64'])
|
||||
@pytest.mark.parametrize('precision', [-1.0, 0.7, 0.5, 0.25, 0.0075])
|
||||
def test_symiir2_initial_bwd(self, dtype, precision, xp):
|
||||
dtype = getattr(xp, dtype)
|
||||
|
||||
c_precision = precision
|
||||
if precision <= 0.0 or precision > 1.0:
|
||||
if dtype in {xp.float32, xp.complex64}:
|
||||
c_precision = 1e-6
|
||||
else:
|
||||
c_precision = 1e-11
|
||||
|
||||
r = xp.asarray(0.5, dtype=dtype)
|
||||
omega = xp.asarray(xp.pi / 3.0, dtype=dtype)
|
||||
cs = 1 - 2 * r * xp.cos(omega) + r * r
|
||||
a2 = 2 * r * xp.cos(omega)
|
||||
a3 = -r * r
|
||||
|
||||
n = 100
|
||||
signal = xp.ones(n, dtype=dtype)
|
||||
|
||||
# Compute initial forward conditions
|
||||
ic = symiirorder2_ic_fwd(signal, r, omega, precision)
|
||||
out = xp.zeros(n + 2, dtype=dtype)
|
||||
out[:2] = ic[0]
|
||||
|
||||
# Apply the forward system cs / (1 - a2 * z^-1 - a3 * z^-2))
|
||||
for i in range(2, n + 2):
|
||||
out[i] = cs * signal[i - 2] + a2 * out[i - 1] + a3 * out[i - 2]
|
||||
|
||||
# Find the backward initial conditions
|
||||
ic2 = xp.zeros(2, dtype=dtype)
|
||||
idx = xp.arange(n)
|
||||
|
||||
diff = (_compute_symiirorder2_bwd_hs(idx, cs, r * r, omega) +
|
||||
_compute_symiirorder2_bwd_hs(idx + 1, cs, r * r, omega))
|
||||
ic2_0_all = np.cumsum(diff * out[:1:-1])
|
||||
pos = xp.nonzero(diff ** 2 < c_precision)[0]
|
||||
ic2[0] = ic2_0_all[pos[0]]
|
||||
|
||||
diff = (_compute_symiirorder2_bwd_hs(idx - 1, cs, r * r, omega) +
|
||||
_compute_symiirorder2_bwd_hs(idx + 2, cs, r * r, omega))
|
||||
|
||||
ic2_1_all = xp.cumulative_sum(diff * out[:1:-1])
|
||||
pos = xp.nonzero(diff ** 2 < c_precision)[0]
|
||||
ic2[1] = ic2_1_all[pos[0]]
|
||||
|
||||
out_ic = symiirorder2_ic_bwd(out, r, omega, precision)[0]
|
||||
xp_assert_close(out_ic, ic2, atol=4e-6, rtol=6e-7)
|
||||
|
||||
@skip_xp_backends(cpu_only=True, reason="internals are numpy-only")
|
||||
@skip_xp_backends("jax.numpy", reason="item assignment in tests")
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', ['float32', 'float64'])
|
||||
@pytest.mark.parametrize('precision', [-1.0, 0.7, 0.5, 0.25, 0.0075])
|
||||
def test_symiir2(self, dtype, precision, xp):
|
||||
dtype = getattr(xp, dtype)
|
||||
|
||||
r = 0.5
|
||||
omega = math.pi / 3.0
|
||||
cs = 1 - 2 * r * math.cos(omega) + r * r
|
||||
a2 = 2 * r * math.cos(omega)
|
||||
a3 = -r * r
|
||||
|
||||
n = 100
|
||||
signal = xp.ones(n, dtype=dtype)
|
||||
|
||||
# Compute initial forward conditions
|
||||
signal_np = np.asarray(signal)
|
||||
ic = symiirorder2_ic_fwd(signal_np, r, omega, precision)
|
||||
ic = xp.asarray(ic)
|
||||
out1 = xp.zeros(n + 2, dtype=dtype)
|
||||
out1[:2] = ic[0, :]
|
||||
|
||||
# Apply the forward system cs / (1 - a2 * z^-1 - a3 * z^-2))
|
||||
for i in range(2, n + 2):
|
||||
out1[i] = cs * signal[i - 2] + a2 * out1[i - 1] + a3 * out1[i - 2]
|
||||
|
||||
# Find the backward initial conditions
|
||||
ic2 = symiirorder2_ic_bwd(np.asarray(out1), r, omega, precision)[0]
|
||||
ic2 = xp.asarray(ic2)
|
||||
|
||||
# Apply the system cs / (1 - a2 * z - a3 * z^2)) in backwards
|
||||
exp = xp.empty(n, dtype=dtype)
|
||||
|
||||
exp[-2:] = xp.flip(ic2)
|
||||
|
||||
for i in range(n - 3, -1, -1):
|
||||
exp[i] = cs * out1[i] + a2 * exp[i + 1] + a3 * exp[i + 2]
|
||||
|
||||
out = symiirorder2(signal, r, omega, precision)
|
||||
xp_assert_close(out, exp, atol=4e-6, rtol=6e-7)
|
||||
|
||||
@skip_xp_backends(cpu_only=True, exceptions=["cupy"], reason="C internals")
|
||||
@pytest.mark.parametrize('dtyp', ['float32', 'float64'])
|
||||
def test_symiir2_values(self, dtyp, xp):
|
||||
rng = np.random.RandomState(1234)
|
||||
s = rng.uniform(size=16).astype(dtyp)
|
||||
s = xp.asarray(s)
|
||||
|
||||
# cupy returns f64 for f32 inputs
|
||||
dtype = xp.float64 if is_cupy(xp) else getattr(xp, dtyp)
|
||||
|
||||
res = symiirorder2(s, 0.1, 0.1, precision=1e-10)
|
||||
|
||||
# values from scipy 1.9.1
|
||||
exp_res = xp.asarray(
|
||||
[0.26572609, 0.53408018, 0.51032696, 0.72115829, 0.69486885,
|
||||
0.3649055 , 0.37349478, 0.74165032, 0.89718521, 0.80582483,
|
||||
0.46758053, 0.51898709, 0.65025605, 0.65394321, 0.45273595,
|
||||
0.53539183], dtype=dtype
|
||||
)
|
||||
|
||||
# The values in SciPy 1.14 agree with those in SciPy 1.9.1 to this
|
||||
# accuracy only. Implementation differences are twofold:
|
||||
# 1. boundary conditions are computed differently
|
||||
# 2. the filter itself uses sosfilt instead of a hardcoded iteration
|
||||
# The boundary conditions seem are tested separately (see
|
||||
# test_symiir2_initial_{fwd,bwd} above, so the difference is likely
|
||||
# due to a different way roundoff errors accumulate in the filter.
|
||||
# In that respect, sosfilt is likely doing a better job.
|
||||
xp_assert_close(res, exp_res, atol=2e-6)
|
||||
|
||||
I1 = xp.asarray(1 + 1j, dtype=xp.result_type(s, xp.complex64))
|
||||
s = s * I1
|
||||
|
||||
with pytest.raises((TypeError, ValueError)):
|
||||
res = symiirorder2(s, 0.5, 0.1)
|
||||
|
||||
@skip_xp_backends(cpu_only=True, exceptions=["cupy"], reason="C internals")
|
||||
@xfail_xp_backends("cupy", reason="cupy does not accept integer arrays")
|
||||
def test_symiir1_integer_input(self, xp):
|
||||
s = xp.where(
|
||||
xp.astype(xp.arange(100) % 2, xp.bool),
|
||||
xp.asarray(-1),
|
||||
xp.asarray(1),
|
||||
)
|
||||
expected = symiirorder1(xp.astype(s, xp_default_dtype(xp)), 0.5, 0.5)
|
||||
out = symiirorder1(s, 0.5, 0.5)
|
||||
xp_assert_close(out, expected)
|
||||
|
||||
@skip_xp_backends(cpu_only=True, exceptions=["cupy"], reason="C internals")
|
||||
@xfail_xp_backends("cupy", reason="cupy does not accept integer arrays")
|
||||
def test_symiir2_integer_input(self, xp):
|
||||
s = xp.where(
|
||||
xp.astype(xp.arange(100) % 2, xp.bool),
|
||||
xp.asarray(-1),
|
||||
xp.asarray(1),
|
||||
)
|
||||
expected = symiirorder2(xp.astype(s, xp_default_dtype(xp)), 0.5, xp.pi / 3.0)
|
||||
out = symiirorder2(s, 0.5, xp.pi / 3.0)
|
||||
xp_assert_close(out, expected)
|
||||
|
|
@ -0,0 +1,322 @@
|
|||
# Code adapted from "upfirdn" python library with permission:
|
||||
#
|
||||
# Copyright (c) 2009, Motorola, Inc
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of Motorola nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
import numpy as np
|
||||
from itertools import product
|
||||
|
||||
from pytest import raises as assert_raises
|
||||
import pytest
|
||||
|
||||
from scipy._lib import array_api_extra as xpx
|
||||
from scipy._lib._array_api import (
|
||||
xp_assert_close, array_namespace
|
||||
)
|
||||
from scipy.signal import upfirdn, firwin
|
||||
from scipy.signal._upfirdn import _output_len, _upfirdn_modes
|
||||
from scipy.signal._upfirdn_apply import _pad_test
|
||||
|
||||
skip_xp_backends = pytest.mark.skip_xp_backends
|
||||
|
||||
|
||||
|
||||
def upfirdn_naive(x, h, up=1, down=1):
|
||||
"""Naive upfirdn processing in Python.
|
||||
|
||||
Note: arg order (x, h) differs to facilitate apply_along_axis use.
|
||||
"""
|
||||
x = np.asarray(x)
|
||||
h = np.asarray(h)
|
||||
out = np.zeros(len(x) * up, x.dtype)
|
||||
out[::up] = x
|
||||
out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)]
|
||||
return out
|
||||
|
||||
|
||||
class UpFIRDnCase:
|
||||
"""Test _UpFIRDn object"""
|
||||
def __init__(self, up, down, h, x_dtype):
|
||||
self.up = up
|
||||
self.down = down
|
||||
self.h = np.atleast_1d(h)
|
||||
self.x_dtype = x_dtype
|
||||
self.rng = np.random.RandomState(17)
|
||||
|
||||
def __call__(self):
|
||||
# tiny signal
|
||||
self.scrub(np.ones(1, self.x_dtype))
|
||||
# ones
|
||||
self.scrub(np.ones(10, self.x_dtype)) # ones
|
||||
# randn
|
||||
x = self.rng.randn(10).astype(self.x_dtype)
|
||||
if self.x_dtype in (np.complex64, np.complex128):
|
||||
x += 1j * self.rng.randn(10)
|
||||
self.scrub(x)
|
||||
# ramp
|
||||
self.scrub(np.arange(10).astype(self.x_dtype))
|
||||
# 3D, random
|
||||
size = (2, 3, 5)
|
||||
x = self.rng.randn(*size).astype(self.x_dtype)
|
||||
if self.x_dtype in (np.complex64, np.complex128):
|
||||
x += 1j * self.rng.randn(*size)
|
||||
for axis in range(len(size)):
|
||||
self.scrub(x, axis=axis)
|
||||
x = x[:, ::2, 1::3].T
|
||||
for axis in range(len(size)):
|
||||
self.scrub(x, axis=axis)
|
||||
|
||||
def scrub(self, x, axis=-1):
|
||||
yr = np.apply_along_axis(upfirdn_naive, axis, x,
|
||||
self.h, self.up, self.down)
|
||||
want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down)
|
||||
assert yr.shape[axis] == want_len
|
||||
y = upfirdn(self.h, x, self.up, self.down, axis=axis)
|
||||
assert y.shape[axis] == want_len
|
||||
assert y.shape == yr.shape
|
||||
dtypes = (self.h.dtype, x.dtype)
|
||||
if all(d == np.complex64 for d in dtypes):
|
||||
assert y.dtype == np.complex64
|
||||
elif np.complex64 in dtypes and np.float32 in dtypes:
|
||||
assert y.dtype == np.complex64
|
||||
elif all(d == np.float32 for d in dtypes):
|
||||
assert y.dtype == np.float32
|
||||
elif np.complex128 in dtypes or np.complex64 in dtypes:
|
||||
assert y.dtype == np.complex128
|
||||
else:
|
||||
assert y.dtype == np.float64
|
||||
xp_assert_close(yr.astype(y.dtype), y)
|
||||
|
||||
|
||||
_UPFIRDN_TYPES = ("int64", "float32", "complex64", "float64", "complex128")
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True, reason='Cython implementation')
|
||||
class TestUpfirdn:
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="enough to only test on numpy")
|
||||
def test_valid_input(self, xp):
|
||||
assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1
|
||||
assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1
|
||||
assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1)
|
||||
|
||||
@pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5])
|
||||
@pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5])
|
||||
def test_singleton(self, len_h, len_x, xp):
|
||||
# gh-9844: lengths producing expected outputs
|
||||
h = xp.zeros(len_h)
|
||||
h = xpx.at(h)[len_h // 2].set(1.) # make h a delta
|
||||
x = xp.ones(len_x)
|
||||
y = upfirdn(h, x, 1, 1)
|
||||
want = xpx.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant', xp=xp)
|
||||
xp_assert_close(y, want)
|
||||
|
||||
def test_shift_x(self, xp):
|
||||
# gh-9844: shifted x can change values?
|
||||
y = upfirdn(xp.asarray([1, 1]), xp.asarray([1.]), 1, 1)
|
||||
xp_assert_close(
|
||||
y, xp.asarray([1.0, 1.0], dtype=xp.float64) # was [0, 1] in the issue
|
||||
)
|
||||
y = upfirdn(xp.asarray([1, 1]), xp.asarray([0., 1.]), 1, 1)
|
||||
xp_assert_close(y, xp.asarray([0.0, 1.0, 1.0], dtype=xp.float64))
|
||||
|
||||
# A bunch of lengths/factors chosen because they exposed differences
|
||||
# between the "old way" and new way of computing length, and then
|
||||
# got `expected` from MATLAB
|
||||
@pytest.mark.parametrize('len_h, len_x, up, down, expected', [
|
||||
(2, 2, 5, 2, [1, 0, 0, 0]),
|
||||
(2, 3, 6, 3, [1, 0, 1, 0, 1]),
|
||||
(2, 4, 4, 3, [1, 0, 0, 0, 1]),
|
||||
(3, 2, 6, 2, [1, 0, 0, 1, 0]),
|
||||
(4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]),
|
||||
])
|
||||
def test_length_factors(self, len_h, len_x, up, down, expected, xp):
|
||||
# gh-9844: weird factors
|
||||
h = xp.zeros(len_h)
|
||||
h = xpx.at(h)[0].set(1.)
|
||||
x = xp.ones(len_x, dtype=xp.float64)
|
||||
y = upfirdn(h, x, up, down)
|
||||
expected = xp.asarray(expected, dtype=xp.float64)
|
||||
xp_assert_close(y, expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'dtype', ["int64", "float32", "complex64", "float64", "complex128"]
|
||||
)
|
||||
@pytest.mark.parametrize('down, want_len', [ # lengths from MATLAB
|
||||
(2, 5015),
|
||||
(11, 912),
|
||||
(79, 127),
|
||||
])
|
||||
def test_vs_convolve(self, down, want_len, dtype, xp):
|
||||
# Check that up=1.0 gives same answer as convolve + slicing
|
||||
random_state = np.random.RandomState(17)
|
||||
size = 10000
|
||||
|
||||
np_dtype = getattr(np, dtype)
|
||||
x = random_state.randn(size).astype(np_dtype)
|
||||
if np_dtype in (np.complex64, np.complex128):
|
||||
x += 1j * random_state.randn(size)
|
||||
|
||||
dtype = getattr(xp, dtype)
|
||||
x = xp.asarray(x, dtype=dtype)
|
||||
|
||||
h = xp.asarray(firwin(31, 1. / down, window='hamming'))
|
||||
yl = xp.asarray(upfirdn_naive(x, h, 1, down))
|
||||
y = upfirdn(h, x, up=1, down=down)
|
||||
assert y.shape == (want_len,)
|
||||
assert yl.shape[0] == y.shape[0]
|
||||
xp_assert_close(yl, y, atol=1e-7, rtol=1e-7)
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="apply_along_axis")
|
||||
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('h', (1., 1j))
|
||||
@pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)])
|
||||
def test_vs_naive_delta(self, x_dtype, h, up, down, xp):
|
||||
UpFIRDnCase(up, down, h, x_dtype)()
|
||||
|
||||
@skip_xp_backends(np_only=True, reason="apply_along_axis")
|
||||
@pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES)
|
||||
@pytest.mark.parametrize('p_max, q_max',
|
||||
list(product((10, 100), (10, 100))))
|
||||
def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max, xp):
|
||||
tests = self._random_factors(p_max, q_max, h_dtype, x_dtype)
|
||||
for test in tests:
|
||||
test()
|
||||
|
||||
def _random_factors(self, p_max, q_max, h_dtype, x_dtype):
|
||||
n_rep = 3
|
||||
longest_h = 25
|
||||
random_state = np.random.RandomState(17)
|
||||
tests = []
|
||||
|
||||
for _ in range(n_rep):
|
||||
# Randomize the up/down factors somewhat
|
||||
p_add = q_max if p_max > q_max else 1
|
||||
q_add = p_max if q_max > p_max else 1
|
||||
p = random_state.randint(p_max) + p_add
|
||||
q = random_state.randint(q_max) + q_add
|
||||
|
||||
# Generate random FIR coefficients
|
||||
len_h = random_state.randint(longest_h) + 1
|
||||
h = np.atleast_1d(random_state.randint(len_h))
|
||||
h = h.astype(h_dtype)
|
||||
if h_dtype is complex:
|
||||
h += 1j * random_state.randint(len_h)
|
||||
|
||||
tests.append(UpFIRDnCase(p, q, h, x_dtype))
|
||||
|
||||
return tests
|
||||
|
||||
@pytest.mark.parametrize('mode', _upfirdn_modes)
|
||||
def test_extensions(self, mode, xp):
|
||||
"""Test vs. manually computed results for modes not in numpy's pad."""
|
||||
x = np.asarray([1, 2, 3, 1], dtype=np.float64)
|
||||
npre, npost = 6, 6
|
||||
y = _pad_test(x, npre=npre, npost=npost, mode=mode)
|
||||
|
||||
x = xp.asarray(x)
|
||||
y = xp.asarray(y)
|
||||
if mode == 'antisymmetric':
|
||||
y_expected = xp.asarray(
|
||||
[3.0, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2])
|
||||
elif mode == 'antireflect':
|
||||
y_expected = xp.asarray(
|
||||
[1.0, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1])
|
||||
elif mode == 'smooth':
|
||||
y_expected = xp.asarray(
|
||||
[-5.0, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11])
|
||||
elif mode == "line":
|
||||
lin_slope = (x[-1] - x[0]) / (x.shape[0] - 1)
|
||||
left = x[0] + xp.arange(-npre, 0, 1, dtype=xp.float64) * lin_slope
|
||||
right = x[-1] + xp.arange(1, npost + 1, dtype=xp.float64) * lin_slope
|
||||
concat = array_namespace(left).concat
|
||||
y_expected = concat((left, x, right))
|
||||
else:
|
||||
y_expected = np.pad(np.asarray(x), (npre, npost), mode=mode)
|
||||
y_expected = xp.asarray(y_expected)
|
||||
|
||||
y_expected = xp.asarray(y_expected, dtype=xp.float64)
|
||||
xp_assert_close(y, y_expected)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'size, h_len, mode, dtype',
|
||||
product(
|
||||
[8],
|
||||
[4, 5, 26], # include cases with h_len > 2*size
|
||||
_upfirdn_modes,
|
||||
["float32", "float64", "complex64", "complex128"],
|
||||
)
|
||||
)
|
||||
def test_modes(self, size, h_len, mode, dtype, xp):
|
||||
dtype_np = getattr(np, dtype)
|
||||
dtype_xp = getattr(xp, dtype)
|
||||
|
||||
random_state = np.random.RandomState(5)
|
||||
x = random_state.randn(size).astype(dtype_np)
|
||||
if dtype in ("complex64", "complex128"):
|
||||
x += 1j * random_state.randn(size)
|
||||
h = np.arange(1, 1 + h_len, dtype=x.real.dtype)
|
||||
|
||||
x = xp.asarray(x, dtype=dtype_xp)
|
||||
h = xp.asarray(h)
|
||||
|
||||
y = upfirdn(h, x, up=1, down=1, mode=mode)
|
||||
# expected result: pad the input, filter with zero padding, then crop
|
||||
npad = h_len - 1
|
||||
if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']:
|
||||
# use _pad_test test function for modes not supported by np.pad.
|
||||
xpad = _pad_test(np.asarray(x), npre=npad, npost=npad, mode=mode)
|
||||
else:
|
||||
xpad = np.pad(np.asarray(x), npad, mode=mode)
|
||||
|
||||
xpad = xp.asarray(xpad)
|
||||
ypad = upfirdn(h, xpad, up=1, down=1, mode='constant')
|
||||
y_expected = ypad[npad:-npad]
|
||||
|
||||
atol = rtol = xp.finfo(dtype_xp).eps * 1e2
|
||||
xp_assert_close(y, y_expected, atol=atol, rtol=rtol)
|
||||
|
||||
|
||||
@skip_xp_backends(cpu_only=True, reason='Cython implementation')
|
||||
def test_output_len_long_input(xp):
|
||||
# Regression test for gh-17375. On Windows, a large enough input
|
||||
# that should have been well within the capabilities of 64 bit integers
|
||||
# would result in a 32 bit overflow because of a bug in Cython 0.29.32.
|
||||
len_h = 1001
|
||||
in_len = 10**8
|
||||
up = 320
|
||||
down = 441
|
||||
out_len = _output_len(len_h, in_len, up, down)
|
||||
# The expected value was computed "by hand" from the formula
|
||||
# (((in_len - 1) * up + len_h) - 1) // down + 1
|
||||
assert out_len == 72562360
|
||||
|
|
@ -0,0 +1,400 @@
|
|||
import numpy as np
|
||||
from pytest import raises as assert_raises
|
||||
from scipy._lib._array_api import (
|
||||
assert_almost_equal, xp_assert_equal, xp_assert_close
|
||||
)
|
||||
|
||||
import scipy.signal._waveforms as waveforms
|
||||
|
||||
|
||||
# These chirp_* functions are the instantaneous frequencies of the signals
|
||||
# returned by chirp().
|
||||
|
||||
def chirp_linear(t, f0, f1, t1):
|
||||
f = f0 + (f1 - f0) * t / t1
|
||||
return f
|
||||
|
||||
|
||||
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
|
||||
if vertex_zero:
|
||||
f = f0 + (f1 - f0) * t**2 / t1**2
|
||||
else:
|
||||
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
|
||||
return f
|
||||
|
||||
|
||||
def chirp_geometric(t, f0, f1, t1):
|
||||
f = f0 * (f1/f0)**(t/t1)
|
||||
return f
|
||||
|
||||
|
||||
def chirp_hyperbolic(t, f0, f1, t1):
|
||||
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
|
||||
return f
|
||||
|
||||
|
||||
def compute_frequency(t, theta):
|
||||
"""
|
||||
Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t).
|
||||
"""
|
||||
# Assume theta and t are 1-D NumPy arrays.
|
||||
# Assume that t is uniformly spaced.
|
||||
dt = t[1] - t[0]
|
||||
f = np.diff(theta)/(2*np.pi) / dt
|
||||
tf = 0.5*(t[1:] + t[:-1])
|
||||
return tf, f
|
||||
|
||||
|
||||
class TestChirp:
|
||||
|
||||
def test_linear_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_linear_freq_01(self):
|
||||
method = 'linear'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 100)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_linear_freq_02(self):
|
||||
method = 'linear'
|
||||
f0 = 200.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 100)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_linear_complex_power(self):
|
||||
method = 'linear'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 100)
|
||||
w_real = waveforms.chirp(t, f0, t1, f1, method, complex=False)
|
||||
w_complex = waveforms.chirp(t, f0, t1, f1, method, complex=True)
|
||||
w_pwr_r = np.var(w_real)
|
||||
w_pwr_c = np.var(w_complex)
|
||||
|
||||
# Making sure that power of the real part is not affected with
|
||||
# complex conversion operation
|
||||
err = w_pwr_r - np.real(w_pwr_c)
|
||||
|
||||
assert(err < 1e-6)
|
||||
|
||||
def test_linear_complex_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=-10.0, f1=1.0, t1=1.0, method='linear',
|
||||
complex=True)
|
||||
xp_assert_close(w, 1.0+0.0j) # dtype must match
|
||||
|
||||
def test_quadratic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_quadratic_at_zero2(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
|
||||
vertex_zero=False)
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_quadratic_complex_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=-1.0, f1=2.0, t1=1.0, method='quadratic',
|
||||
complex=True)
|
||||
xp_assert_close(w, 1.0+0j)
|
||||
|
||||
def test_quadratic_freq_01(self):
|
||||
method = 'quadratic'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 2000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_quadratic_freq_02(self):
|
||||
method = 'quadratic'
|
||||
f0 = 20.0
|
||||
f1 = 10.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 2000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_logarithmic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_logarithmic_freq_01(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 1.0
|
||||
f1 = 2.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_logarithmic_freq_02(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 200.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_logarithmic_freq_03(self):
|
||||
method = 'logarithmic'
|
||||
f0 = 100.0
|
||||
f1 = 100.0
|
||||
t1 = 10.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_hyperbolic_at_zero(self):
|
||||
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
|
||||
assert_almost_equal(w, 1.0)
|
||||
|
||||
def test_hyperbolic_freq_01(self):
|
||||
method = 'hyperbolic'
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10000)
|
||||
# f0 f1
|
||||
cases = [[10.0, 1.0],
|
||||
[1.0, 10.0],
|
||||
[-10.0, -1.0],
|
||||
[-1.0, -10.0]]
|
||||
for f0, f1 in cases:
|
||||
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = chirp_hyperbolic(tf, f0, f1, t1)
|
||||
xp_assert_close(f, expected, atol=1e-7)
|
||||
|
||||
def test_hyperbolic_zero_freq(self):
|
||||
# f0=0 or f1=0 must raise a ValueError.
|
||||
method = 'hyperbolic'
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 5)
|
||||
assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method)
|
||||
assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method)
|
||||
|
||||
def test_unknown_method(self):
|
||||
method = "foo"
|
||||
f0 = 10.0
|
||||
f1 = 20.0
|
||||
t1 = 1.0
|
||||
t = np.linspace(0, t1, 10)
|
||||
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
|
||||
|
||||
def test_integer_t1(self):
|
||||
f0 = 10.0
|
||||
f1 = 20.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
t1 = 3.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
t1 = 3
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 't1=3' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_f0(self):
|
||||
f1 = 20.0
|
||||
t1 = 3.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
f0 = 10.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
f0 = 10
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f0=10' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_f1(self):
|
||||
f0 = 10.0
|
||||
t1 = 3.0
|
||||
t = np.linspace(-1, 1, 11)
|
||||
f1 = 20.0
|
||||
float_result = waveforms.chirp(t, f0, t1, f1)
|
||||
f1 = 20
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f1=20' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_all(self):
|
||||
f0 = 10
|
||||
t1 = 3
|
||||
f1 = 20
|
||||
t = np.linspace(-1, 1, 11)
|
||||
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
|
||||
int_result = waveforms.chirp(t, f0, t1, f1)
|
||||
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
|
||||
class TestSweepPoly:
|
||||
|
||||
def test_sweep_poly_quad1(self):
|
||||
p = np.poly1d([1.0, 0.0, 1.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_sweep_poly_const(self):
|
||||
p = np.poly1d(2.0)
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_sweep_poly_linear(self):
|
||||
p = np.poly1d([-1.0, 10.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_sweep_poly_quad2(self):
|
||||
p = np.poly1d([1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 3.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_sweep_poly_cubic(self):
|
||||
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = p(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_sweep_poly_cubic2(self):
|
||||
"""Use an array of coefficients instead of a poly1d."""
|
||||
p = np.array([2.0, 1.0, 0.0, -2.0])
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = np.poly1d(p)(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert abserr < 1e-6
|
||||
|
||||
def test_sweep_poly_cubic3(self):
|
||||
"""Use a list of coefficients instead of a poly1d."""
|
||||
p = [2.0, 1.0, 0.0, -2.0]
|
||||
t = np.linspace(0, 2.0, 10000)
|
||||
phase = waveforms._sweep_poly_phase(t, p)
|
||||
tf, f = compute_frequency(t, phase)
|
||||
expected = np.poly1d(p)(tf)
|
||||
abserr = np.max(np.abs(f - expected))
|
||||
assert abserr < 1e-6
|
||||
|
||||
|
||||
class TestGaussPulse:
|
||||
|
||||
def test_integer_fc(self):
|
||||
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
|
||||
int_result = waveforms.gausspulse('cutoff', fc=1000)
|
||||
err_msg = "Integer input 'fc=1000' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_bw(self):
|
||||
float_result = waveforms.gausspulse('cutoff', bw=1.0)
|
||||
int_result = waveforms.gausspulse('cutoff', bw=1)
|
||||
err_msg = "Integer input 'bw=1' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_bwr(self):
|
||||
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
|
||||
int_result = waveforms.gausspulse('cutoff', bwr=-6)
|
||||
err_msg = "Integer input 'bwr=-6' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
def test_integer_tpr(self):
|
||||
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
|
||||
int_result = waveforms.gausspulse('cutoff', tpr=-60)
|
||||
err_msg = "Integer input 'tpr=-60' gives wrong result"
|
||||
xp_assert_equal(int_result, float_result, err_msg=err_msg)
|
||||
|
||||
|
||||
class TestUnitImpulse:
|
||||
|
||||
def test_no_index(self):
|
||||
xp_assert_equal(waveforms.unit_impulse(7),
|
||||
np.asarray([1.0, 0, 0, 0, 0, 0, 0]))
|
||||
xp_assert_equal(waveforms.unit_impulse((3, 3)),
|
||||
np.asarray([[1.0, 0, 0], [0, 0, 0], [0, 0, 0]]))
|
||||
|
||||
def test_index(self):
|
||||
xp_assert_equal(waveforms.unit_impulse(10, 3),
|
||||
np.asarray([0.0, 0, 0, 1, 0, 0, 0, 0, 0, 0]))
|
||||
xp_assert_equal(waveforms.unit_impulse((3, 3), (1, 1)),
|
||||
np.asarray([[0.0, 0, 0], [0, 1, 0], [0, 0, 0]]))
|
||||
|
||||
# Broadcasting
|
||||
imp = waveforms.unit_impulse((4, 4), 2)
|
||||
xp_assert_equal(imp, np.asarray([[0.0, 0, 0, 0],
|
||||
[0.0, 0, 0, 0],
|
||||
[0.0, 0, 1, 0],
|
||||
[0.0, 0, 0, 0]]))
|
||||
|
||||
def test_mid(self):
|
||||
xp_assert_equal(waveforms.unit_impulse((3, 3), 'mid'),
|
||||
np.asarray([[0.0, 0, 0], [0, 1, 0], [0, 0, 0]]))
|
||||
xp_assert_equal(waveforms.unit_impulse(9, 'mid'),
|
||||
np.asarray([0.0, 0, 0, 0, 1, 0, 0, 0, 0]))
|
||||
|
||||
def test_dtype(self):
|
||||
imp = waveforms.unit_impulse(7)
|
||||
assert np.issubdtype(imp.dtype, np.floating)
|
||||
|
||||
imp = waveforms.unit_impulse(5, 3, dtype=int)
|
||||
assert np.issubdtype(imp.dtype, np.integer)
|
||||
|
||||
imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
|
||||
assert np.issubdtype(imp.dtype, np.complexfloating)
|
||||
|
||||
|
||||
class TestSawtoothWaveform:
|
||||
def test_dtype(self):
|
||||
waveform = waveforms.sawtooth(
|
||||
np.array(1, dtype=np.float32), width=np.float32(1)
|
||||
)
|
||||
assert waveform.dtype == np.float64
|
||||
|
||||
waveform = waveforms.sawtooth(1)
|
||||
assert waveform.dtype == np.float64
|
||||
|
||||
|
||||
class TestSquareWaveform:
|
||||
def test_dtype(self):
|
||||
waveform = waveforms.square(np.array(1, dtype=np.float32), duty=np.float32(0.5))
|
||||
assert waveform.dtype == np.float64
|
||||
|
||||
waveform = waveforms.square(1)
|
||||
assert waveform.dtype == np.float64
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
import numpy as np
|
||||
from numpy.testing import assert_array_equal, assert_array_almost_equal
|
||||
|
||||
import scipy.signal._wavelets as wavelets
|
||||
|
||||
|
||||
class TestWavelets:
|
||||
def test_ricker(self):
|
||||
w = wavelets._ricker(1.0, 1)
|
||||
expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
|
||||
assert_array_equal(w, expected)
|
||||
|
||||
lengths = [5, 11, 15, 51, 101]
|
||||
for length in lengths:
|
||||
w = wavelets._ricker(length, 1.0)
|
||||
assert len(w) == length
|
||||
max_loc = np.argmax(w)
|
||||
assert max_loc == (length // 2)
|
||||
|
||||
points = 100
|
||||
w = wavelets._ricker(points, 2.0)
|
||||
half_vec = np.arange(0, points // 2)
|
||||
# Wavelet should be symmetric
|
||||
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
|
||||
|
||||
# Check zeros
|
||||
aas = [5, 10, 15, 20, 30]
|
||||
points = 99
|
||||
for a in aas:
|
||||
w = wavelets._ricker(points, a)
|
||||
vec = np.arange(0, points) - (points - 1.0) / 2
|
||||
exp_zero1 = np.argmin(np.abs(vec - a))
|
||||
exp_zero2 = np.argmin(np.abs(vec + a))
|
||||
assert_array_almost_equal(w[exp_zero1], 0)
|
||||
assert_array_almost_equal(w[exp_zero2], 0)
|
||||
|
||||
def test_cwt(self):
|
||||
widths = [1.0]
|
||||
def delta_wavelet(s, t):
|
||||
return np.array([1])
|
||||
len_data = 100
|
||||
test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
|
||||
|
||||
# Test delta function input gives same data as output
|
||||
cwt_dat = wavelets._cwt(test_data, delta_wavelet, widths)
|
||||
assert cwt_dat.shape == (len(widths), len_data)
|
||||
assert_array_almost_equal(test_data, cwt_dat.flatten())
|
||||
|
||||
# Check proper shape on output
|
||||
widths = [1, 3, 4, 5, 10]
|
||||
cwt_dat = wavelets._cwt(test_data, wavelets._ricker, widths)
|
||||
assert cwt_dat.shape == (len(widths), len_data)
|
||||
|
||||
widths = [len_data * 10]
|
||||
# Note: this wavelet isn't defined quite right, but is fine for this test
|
||||
def flat_wavelet(l, w):
|
||||
return np.full(w, 1 / w)
|
||||
cwt_dat = wavelets._cwt(test_data, flat_wavelet, widths)
|
||||
assert_array_almost_equal(cwt_dat, np.mean(test_data))
|
||||
File diff suppressed because one or more lines are too long
Loading…
Add table
Add a link
Reference in a new issue