up follow livre

This commit is contained in:
Tykayn 2025-08-30 18:14:14 +02:00 committed by tykayn
parent b4b4398bb0
commit 3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions

View file

@ -0,0 +1,148 @@
"""
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
==================================================
.. currentmodule:: scipy.sparse.linalg
Abstract linear operators
-------------------------
.. autosummary::
:toctree: generated/
LinearOperator -- abstract representation of a linear operator
aslinearoperator -- convert an object to an abstract linear operator
Matrix Operations
-----------------
.. autosummary::
:toctree: generated/
inv -- compute the sparse matrix inverse
expm -- compute the sparse matrix exponential
expm_multiply -- compute the product of a matrix exponential and a matrix
matrix_power -- compute the matrix power by raising a matrix to an exponent
Matrix norms
------------
.. autosummary::
:toctree: generated/
norm -- Norm of a sparse matrix
onenormest -- Estimate the 1-norm of a sparse matrix
Solving linear problems
-----------------------
Direct methods for linear equation systems:
.. autosummary::
:toctree: generated/
spsolve -- Solve the sparse linear system Ax=b
spsolve_triangular -- Solve sparse linear system Ax=b for a triangular A.
is_sptriangular -- Check if sparse A is triangular.
spbandwidth -- Find the bandwidth of a sparse matrix.
factorized -- Pre-factorize matrix to a function solving a linear system
MatrixRankWarning -- Warning on exactly singular matrices
use_solver -- Select direct solver to use
Iterative methods for linear equation systems:
.. autosummary::
:toctree: generated/
bicg -- Use BIConjugate Gradient iteration to solve Ax = b
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve Ax = b
cg -- Use Conjugate Gradient iteration to solve Ax = b
cgs -- Use Conjugate Gradient Squared iteration to solve Ax = b
gmres -- Use Generalized Minimal RESidual iteration to solve Ax = b
lgmres -- Solve a matrix equation using the LGMRES algorithm
minres -- Use MINimum RESidual iteration to solve Ax = b
qmr -- Use Quasi-Minimal Residual iteration to solve Ax = b
gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve Ax = b
Iterative methods for least-squares problems:
.. autosummary::
:toctree: generated/
lsqr -- Find the least-squares solution to a sparse linear equation system
lsmr -- Find the least-squares solution to a sparse linear equation system
Matrix factorizations
---------------------
Eigenvalue problems:
.. autosummary::
:toctree: generated/
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
Singular values problems:
.. autosummary::
:toctree: generated/
svds -- Compute k singular values/vectors for a sparse matrix
The `svds` function supports the following solvers:
.. toctree::
sparse.linalg.svds-arpack
sparse.linalg.svds-lobpcg
sparse.linalg.svds-propack
Complete or incomplete LU factorizations
.. autosummary::
:toctree: generated/
splu -- Compute a LU decomposition for a sparse matrix
spilu -- Compute an incomplete LU decomposition for a sparse matrix
SuperLU -- Object representing an LU factorization
Sparse arrays with structure
----------------------------
.. autosummary::
:toctree: generated/
LaplacianNd -- Laplacian on a uniform rectangular grid in ``N`` dimensions
Exceptions
----------
.. autosummary::
:toctree: generated/
ArpackNoConvergence
ArpackError
"""
from ._isolve import *
from ._dsolve import *
from ._interface import *
from ._eigen import *
from ._matfuncs import *
from ._onenormest import *
from ._norm import *
from ._expm_multiply import *
from ._special_sparse_arrays import *
# Deprecated namespaces, to be removed in v2.0.0
from . import isolve, dsolve, interface, eigen, matfuncs
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,71 @@
"""
Linear Solvers
==============
The default solver is SuperLU (included in the scipy distribution),
which can solve real or complex linear systems in both single and
double precisions. It is automatically replaced by UMFPACK, if
available. Note that UMFPACK works in double precision only, so
switch it off by::
>>> from scipy.sparse.linalg import spsolve, use_solver
>>> use_solver(useUmfpack=False)
to solve in the single precision. See also use_solver documentation.
Example session::
>>> from scipy.sparse import csc_array, dia_array
>>> from numpy import array
>>>
>>> print("Inverting a sparse linear system:")
>>> print("The sparse matrix (constructed from diagonals):")
>>> a = dia_array(([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1]), shape=(5, 5))
>>> b = array([1, 2, 3, 4, 5])
>>> print("Solve: single precision complex:")
>>> use_solver( useUmfpack = False )
>>> a = a.astype('F')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a@x-b)
>>>
>>> print("Solve: double precision complex:")
>>> use_solver( useUmfpack = True )
>>> a = a.astype('D')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a@x-b)
>>>
>>> print("Solve: double precision:")
>>> a = a.astype('d')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a@x-b)
>>>
>>> print("Solve: single precision:")
>>> use_solver( useUmfpack = False )
>>> a = a.astype('f')
>>> x = spsolve(a, b.astype('f'))
>>> print(x)
>>> print("Error: ", a@x-b)
"""
#import umfpack
#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) )
#del umfpack
from .linsolve import *
from ._superlu import SuperLU
from . import _add_newdocs
from . import linsolve
__all__ = [
'MatrixRankWarning', 'SuperLU', 'factorized',
'spilu', 'splu', 'spsolve', 'is_sptriangular',
'spsolve_triangular', 'use_solver', 'spbandwidth',
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,147 @@
from numpy.lib import add_newdoc
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU',
"""
LU factorization of a sparse matrix.
Factorization is represented as::
Pr @ A @ Pc = L @ U
To construct these `SuperLU` objects, call the `splu` and `spilu`
functions.
Attributes
----------
shape
nnz
perm_c
perm_r
L
U
Methods
-------
solve
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
The LU decomposition can be used to solve matrix equations. Consider:
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import splu
>>> A = csc_array([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]])
This can be solved for a given right-hand side:
>>> lu = splu(A)
>>> b = np.array([1, 2, 3, 4])
>>> x = lu.solve(b)
>>> A.dot(x)
array([ 1., 2., 3., 4.])
The ``lu`` object also contains an explicit representation of the
decomposition. The permutations are represented as mappings of
indices:
>>> lu.perm_r
array([2, 1, 3, 0], dtype=int32) # may vary
>>> lu.perm_c
array([0, 1, 3, 2], dtype=int32) # may vary
The L and U factors are sparse matrices in CSC format:
>>> lu.L.toarray()
array([[ 1. , 0. , 0. , 0. ], # may vary
[ 0.5, 1. , 0. , 0. ],
[ 0.5, -1. , 1. , 0. ],
[ 0.5, 1. , 0. , 1. ]])
>>> lu.U.toarray()
array([[ 2. , 2. , 0. , 1. ], # may vary
[ 0. , -1. , 1. , -0.5],
[ 0. , 0. , 5. , -1. ],
[ 0. , 0. , 0. , 2. ]])
The permutation matrices can be constructed:
>>> Pr = csc_array((np.ones(4), (lu.perm_r, np.arange(4))))
>>> Pc = csc_array((np.ones(4), (np.arange(4), lu.perm_c)))
We can reassemble the original matrix:
>>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray()
array([[ 1., 2., 0., 4.],
[ 1., 0., 0., 1.],
[ 1., 0., 2., 1.],
[ 2., 2., 1., 0.]])
""")
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve',
"""
solve(rhs[, trans])
Solves linear system of equations with one or several right-hand sides.
Parameters
----------
rhs : ndarray, shape (n,) or (n, k)
Right hand side(s) of equation
trans : {'N', 'T', 'H'}, optional
Type of system to solve::
'N': A @ x == rhs (default)
'T': A^T @ x == rhs
'H': A^H @ x == rhs
i.e., normal, transposed, and hermitian conjugate.
Returns
-------
x : ndarray, shape ``rhs.shape``
Solution vector(s)
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L',
"""
Lower triangular factor with unit diagonal as a
`scipy.sparse.csc_array`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U',
"""
Upper triangular factor as a `scipy.sparse.csc_array`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape',
"""
Shape of the original matrix as a tuple of ints.
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz',
"""
Number of nonzero elements in the matrix.
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c',
"""
Permutation Pc represented as an array of indices.
See the `SuperLU` docstring for details.
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r',
"""
Permutation Pr represented as an array of indices.
See the `SuperLU` docstring for details.
"""))

View file

@ -0,0 +1,882 @@
from warnings import warn, catch_warnings, simplefilter
import numpy as np
from numpy import asarray
from scipy.sparse import (issparse, SparseEfficiencyWarning,
csr_array, csc_array, eye_array, diags_array)
from scipy.sparse._sputils import (is_pydata_spmatrix, convert_pydata_sparse_to_scipy,
get_index_dtype, safely_cast_index_arrays)
from scipy.linalg import LinAlgError
import copy
import threading
from . import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
noScikit = True
useUmfpack = threading.local()
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
'MatrixRankWarning', 'spsolve_triangular', 'is_sptriangular', 'spbandwidth']
class MatrixRankWarning(UserWarning):
"""Warning for exactly singular matrices."""
pass
def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
if ``scikits.umfpack`` is installed. Default: True
assumeSortedIndices : bool, optional
Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
Has effect only if useUmfpack is True and ``scikits.umfpack`` is
installed. Default: False
Notes
-----
The default sparse solver is UMFPACK when available
(``scikits.umfpack`` is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
References
----------
.. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [2] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import use_solver, spsolve
>>> from scipy.sparse import csc_array
>>> R = np.random.randn(5, 5)
>>> A = csc_array(R)
>>> b = np.random.randn(5)
>>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
>>> x = spsolve(A, b)
>>> np.allclose(A.dot(x), b)
True
>>> use_solver(useUmfpack=True) # reset umfPack usage to default
"""
global useUmfpack
if 'useUmfpack' in kwargs:
useUmfpack.u = kwargs['useUmfpack']
if useUmfpack.u and 'assumeSortedIndices' in kwargs:
umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
_families = {
(np.float64, np.int32): 'di',
(np.complex128, np.int32): 'zi',
(np.float64, np.int64): 'dl',
(np.complex128, np.int64): 'zl'
}
# A.dtype.name can only be "float64" or
# "complex128" in control flow
f_type = getattr(np, A.dtype.name)
# control flow may allow for more index
# types to get through here
i_type = getattr(np, A.indices.dtype.name)
try:
family = _families[(f_type, i_type)]
except KeyError as e:
msg = ('only float64 or complex128 matrices with int32 or int64 '
f'indices are supported! (got: matrix: {f_type}, indices: {i_type})')
raise ValueError(msg) from e
# See gh-8278. Considered converting only if
# A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
# but that didn't always fix the issue.
family = family[0] + "l"
A_new = copy.copy(A)
A_new.indptr = np.asarray(A.indptr, dtype=np.int64)
A_new.indices = np.asarray(A.indices, dtype=np.int64)
return family, A_new
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse array or matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse array or matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.shape must be (n,) or (n, 1).
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
use_umfpack : bool, optional
if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
[6]_ . This is only referenced if b is a vector and
``scikits.umfpack`` is installed.
Returns
-------
x : ndarray or sparse array or matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
References
----------
.. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
COLAMD, an approximate column minimum degree ordering algorithm,
ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
:doi:`10.1145/1024074.1024080`
.. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
minimum degree ordering algorithm, ACM Trans. on Mathematical
Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
.. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [4] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spsolve
>>> A = csc_array([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> B = csc_array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve(A, B)
>>> np.allclose(A.dot(x).toarray(), B.toarray())
True
"""
is_pydata_sparse = is_pydata_spmatrix(b)
pydata_sparse_cls = b.__class__ if is_pydata_sparse else None
A = convert_pydata_sparse_to_scipy(A)
b = convert_pydata_sparse_to_scipy(b)
if not (issparse(A) and A.format in ("csc", "csr")):
A = csc_array(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning, stacklevel=2)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = issparse(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
result_dtype = np.promote_types(A.dtype, b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError(f"matrix must be square (has shape {(M, N)})")
if M != b.shape[0]:
raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})")
if not hasattr(useUmfpack, 'u'):
useUmfpack.u = not noScikit
use_umfpack = use_umfpack and useUmfpack.u
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack.u = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if A.format == "csc":
flag = 1 # CSC format
else:
flag = 0 # CSR format
indices = A.indices.astype(np.intc, copy=False)
indptr = A.indptr.astype(np.intc, copy=False)
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not (b.format == "csc" or is_pydata_spmatrix(b)):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format',
SparseEfficiencyWarning, stacklevel=2)
b = csc_array(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
bj = b[:, j].toarray().ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.full(segment_length, j, dtype=int))
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
idx_dtype = get_index_dtype(maxval=max(b.shape))
sparse_row = np.concatenate(row_segs, dtype=idx_dtype)
sparse_col = np.concatenate(col_segs, dtype=idx_dtype)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
if is_pydata_sparse:
x = pydata_sparse_cls.from_scipy_sparse(x)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
relax=None, panel_size=None, options=None):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse array or matrix
Sparse array to factorize. Most efficient when provided in CSC
format. Other formats will be converted to CSC before factorization.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
When a real array is factorized and the returned SuperLU object's ``solve()``
method is used with complex arguments an error is generated. Instead, cast the
initial array to complex and then factorize.
This function uses the SuperLU library.
References
----------
.. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import splu
>>> A = csc_array([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = splu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
A_cls = type(A)
def csc_construct_func(*a, cls=A_cls):
return cls.from_scipy_sparse(csc_array(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_array
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = safely_cast_index_arrays(A, np.intc, "SuperLU")
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse array to factorize. Most efficient when provided in CSC format.
Other formats will be converted to CSC before factorization.
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
When a real array is factorized and the returned SuperLU object's ``solve()`` method
is used with complex arguments an error is generated. Instead, cast the initial
array to complex and then factorize.
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spilu
>>> A = csc_array([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = spilu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
A_cls = type(A)
def csc_construct_func(*a, cls=A_cls):
return cls.from_scipy_sparse(csc_array(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_array
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('spilu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = safely_cast_index_arrays(A, np.intc, "SuperLU")
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=True, options=_options)
def factorized(A):
"""
Return a function for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input. A in CSC format is most efficient. A CSR format matrix will
be converted to CSC before factorization.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import factorized
>>> from scipy.sparse import csc_array
>>> A = np.array([[ 3. , 2. , -1. ],
... [ 2. , -2. , 4. ],
... [-1. , 0.5, -1. ]])
>>> solve = factorized(csc_array(A)) # Makes LU decomposition.
>>> rhs1 = np.array([1, -2, 0])
>>> solve(rhs1) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
if not hasattr(useUmfpack, 'u'):
useUmfpack.u = not noScikit
if useUmfpack.u:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not (issparse(A) and A.format == "csc"):
A = csc_array(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
A = A._asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack.u = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
# Make LU decomposition.
umf.numeric(A)
def solve(b):
with np.errstate(divide="ignore", invalid="ignore"):
# Ignoring warnings with numpy >= 1.23.0, see gh-16523
result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return result
return solve
else:
return splu(A).solve
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
unit_diagonal=False):
"""
Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
Parameters
----------
A : (M, M) sparse array or matrix
A sparse square triangular matrix. Should be in CSR or CSC format.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``A x = b``
lower : bool, optional
Whether `A` is a lower or upper triangular matrix.
Default is lower triangular matrix.
overwrite_A : bool, optional
Allow changing `A`.
Enabling gives a performance gain. Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b`.
Enabling gives a performance gain. Default is False.
If `overwrite_b` is True, it should be ensured that
`b` has an appropriate dtype to be able to store the result.
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1.
.. versionadded:: 1.4.0
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``A x = b``. Shape of return matches shape
of `b`.
Raises
------
LinAlgError
If `A` is singular or not triangular.
ValueError
If shape of `A` or shape of `b` do not match the requirements.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spsolve_triangular
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve_triangular(A, B)
>>> np.allclose(A.dot(x), B)
True
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
trans = "N"
if issparse(A) and A.format == "csr":
A = A.T
trans = "T"
lower = not lower
if not (issparse(A) and A.format == "csc"):
warn('CSC or CSR matrix format is required. Converting to CSC matrix.',
SparseEfficiencyWarning, stacklevel=2)
A = csc_array(A)
elif not overwrite_A:
A = A.copy()
M, N = A.shape
if M != N:
raise ValueError(
f'A must be a square matrix but its shape is {A.shape}.')
if unit_diagonal:
with catch_warnings():
simplefilter('ignore', SparseEfficiencyWarning)
A.setdiag(1)
else:
diag = A.diagonal()
if np.any(diag == 0):
raise LinAlgError(
'A is singular: zero entry on diagonal.')
invdiag = 1/diag
if trans == "N":
A = A @ diags_array(invdiag)
else:
A = (A.T @ diags_array(invdiag)).T
# sum duplicates for non-canonical format
A.sum_duplicates()
b = np.asanyarray(b)
if b.ndim not in [1, 2]:
raise ValueError(
f'b must have 1 or 2 dims but its shape is {b.shape}.')
if M != b.shape[0]:
raise ValueError(
'The size of the dimensions of A must be equal to '
'the size of the first dimension of b but the shape of A is '
f'{A.shape} and the shape of b is {b.shape}.'
)
result_dtype = np.promote_types(np.promote_types(A.dtype, np.float32), b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
elif not overwrite_b:
b = b.copy()
if lower:
L = A
U = csc_array((N, N), dtype=result_dtype)
else:
L = eye_array(N, dtype=result_dtype, format='csc')
U = A
U.setdiag(0)
x, info = _superlu.gstrs(trans,
N, L.nnz, L.data, L.indices, L.indptr,
N, U.nnz, U.data, U.indices, U.indptr,
b)
if info:
raise LinAlgError('A is singular.')
if not unit_diagonal:
invdiag = invdiag.reshape(-1, *([1] * (len(x.shape) - 1)))
x = x * invdiag
return x
def is_sptriangular(A):
"""Returns 2-tuple indicating lower/upper triangular structure for sparse ``A``
Checks for triangular structure in ``A``. The result is summarized in
two boolean values ``lower`` and ``upper`` to designate whether ``A`` is
lower triangular or upper triangular respectively. Diagonal ``A`` will
result in both being True. Non-triangular structure results in False for both.
Only the sparse structure is used here. Values are not checked for zeros.
This function will convert a copy of ``A`` to CSC format if it is not already
CSR or CSC format. So it may be more efficient to convert it yourself if you
have other uses for the CSR/CSC version.
If ``A`` is not square, the portions outside the upper left square of the
matrix do not affect its triangular structure. You probably want to work
with the square portion of the matrix, though it is not requred here.
Parameters
----------
A : SciPy sparse array or matrix
A sparse matrix preferrably in CSR or CSC format.
Returns
-------
lower, upper : 2-tuple of bool
.. versionadded:: 1.15.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array, eye_array
>>> from scipy.sparse.linalg import is_sptriangular
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> is_sptriangular(A)
(True, False)
>>> D = eye_array(3, format='csr')
>>> is_sptriangular(D)
(True, True)
"""
if not (issparse(A) and A.format in ("csc", "csr", "coo", "dia", "dok", "lil")):
warn('is_sptriangular needs sparse and not BSR format. Converting to CSR.',
SparseEfficiencyWarning, stacklevel=2)
A = csr_array(A)
# bsr is better off converting to csr
if A.format == "dia":
return A.offsets.max() <= 0, A.offsets.min() >= 0
elif A.format == "coo":
rows, cols = A.coords
return (cols <= rows).all(), (cols >= rows).all()
elif A.format == "dok":
return all(c <= r for r, c in A.keys()), all(c >= r for r, c in A.keys())
elif A.format == "lil":
lower = all(col <= row for row, cols in enumerate(A.rows) for col in cols)
upper = all(col >= row for row, cols in enumerate(A.rows) for col in cols)
return lower, upper
# format in ("csc", "csr")
indptr, indices = A.indptr, A.indices
N = len(indptr) - 1
lower, upper = True, True
# check middle, 1st, last col (treat as CSC and switch at end if CSR)
for col in [N // 2, 0, -1]:
rows = indices[indptr[col]:indptr[col + 1]]
upper = upper and (col >= rows).all()
lower = lower and (col <= rows).all()
if not upper and not lower:
return False, False
# check all cols
cols = np.repeat(np.arange(N), np.diff(indptr))
rows = indices
upper = upper and (cols >= rows).all()
lower = lower and (cols <= rows).all()
if A.format == 'csr':
return upper, lower
return lower, upper
def spbandwidth(A):
"""Return the lower and upper bandwidth of a 2D numeric array.
Computes the lower and upper limits on the bandwidth of the
sparse 2D array ``A``. The result is summarized as a 2-tuple
of positive integers ``(lo, hi)``. A zero denotes no sub/super
diagonal entries on that side (tringular). The maximum value
for ``lo``(``hi``) is one less than the number of rows(cols).
Only the sparse structure is used here. Values are not checked for zeros.
Parameters
----------
A : SciPy sparse array or matrix
A sparse matrix preferrably in CSR or CSC format.
Returns
-------
below, above : 2-tuple of int
The distance to the farthest non-zero diagonal below/above the
main diagonal.
.. versionadded:: 1.15.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import spbandwidth
>>> from scipy.sparse import csc_array, eye_array
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> spbandwidth(A)
(2, 0)
>>> D = eye_array(3, format='csr')
>>> spbandwidth(D)
(0, 0)
"""
if not (issparse(A) and A.format in ("csc", "csr", "coo", "dia", "dok")):
warn('spbandwidth needs sparse format not LIL and BSR. Converting to CSR.',
SparseEfficiencyWarning, stacklevel=2)
A = csr_array(A)
# bsr and lil are better off converting to csr
if A.format == "dia":
return max(0, -A.offsets.min().item()), max(0, A.offsets.max().item())
if A.format in ("csc", "csr"):
indptr, indices = A.indptr, A.indices
N = len(indptr) - 1
gap = np.repeat(np.arange(N), np.diff(indptr)) - indices
if A.format == 'csr':
gap = -gap
elif A.format == "coo":
gap = A.coords[1] - A.coords[0]
elif A.format == "dok":
gap = [(c - r) for r, c in A.keys()] + [0]
return -min(gap), max(gap)
return max(-np.min(gap).item(), 0), max(np.max(gap).item(), 0)

View file

@ -0,0 +1,928 @@
import sys
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot
from numpy.exceptions import ComplexWarning
from numpy.testing import (
assert_array_almost_equal, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (dia_array, SparseEfficiencyWarning, csc_array,
csr_array, eye_array, issparse, dok_array, lil_array, bsr_array, kron)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized,
is_sptriangular, spbandwidth)
import scipy.sparse
from scipy._lib._testutils import check_free_memory
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if issparse(a):
return a.toarray()
else:
return a
def setup_bug_8278():
N = 2 ** 6
h = 1/N
Ah1D = dia_array(([-1, 2, -1], [-1, 0, 1]), shape=(N-1, N-1))/(h**2)
eyeN = eye_array(N - 1)
A = (kron(eyeN, kron(eyeN, Ah1D))
+ kron(eyeN, kron(Ah1D, eyeN))
+ kron(Ah1D, kron(eyeN, eyeN)))
b = np.random.rand((N-1)**3)
return A, b
class TestFactorized:
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = dia_array(((d, 2*d, d[::-1]), (-3, 0, 5)), shape=(n,n)).tocsc()
def _check_singular(self):
A = csc_array((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
rng = np.random.default_rng(14332)
a = csc_array(rng.random((n, n)))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
with assert_raises(RuntimeError, match="Factor is exactly singular"):
self._check_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
msg = "can only factor square matrices"
with assert_raises(ValueError, match=msg):
factorized(self.A[:, :4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
rng = np.random.default_rng(230498)
b = rng.random(4)
B = rng.random((4, 3))
BB = rng.random((self.n, 3, 9))
with assert_raises(ValueError, match="is of incompatible size"):
solve(b)
with assert_raises(ValueError, match="is of incompatible size"):
solve(B)
with assert_raises(ValueError,
match="object too deep for desired array"):
solve(BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
rng = np.random.default_rng(643095823)
b = rng.random(4)
B = rng.random((4, 3))
BB = rng.random((self.n, 3, 9))
# does not raise
solve(b)
msg = "object too deep for desired array"
with assert_raises(ValueError, match=msg):
solve(B)
with assert_raises(ValueError, match=msg):
solve(BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
rng = np.random.default_rng(23454)
b = rng.random(4)
for t in [np.complex64, np.complex128]:
with assert_raises(TypeError, match="Cannot cast array data"):
solve(b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
rng = np.random.default_rng(23454)
b = rng.random(4)
for t in [np.complex64, np.complex128]:
assert_warns(ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_array((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
with assert_raises(RuntimeError,
match="UMFPACK_ERROR_invalid_matrix"):
factorized(A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
A = A.tocsc()
f = factorized(A)
x = f(b)
assert_array_almost_equal(A @ x, b)
class TestLinsolve:
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_array((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_array((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtime error or return value
# appropriate for singular input (which yields the warning)
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert not np.isfinite(x).any()
except RuntimeError:
pass
@pytest.mark.parametrize('format', ['csc', 'csr'])
@pytest.mark.parametrize('idx_dtype', [np.int32, np.int64])
def test_twodiags(self, format: str, idx_dtype: np.dtype):
A = dia_array(([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1]),
shape=(5, 5)).asformat(format)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
Asp = A.astype(t)
Asp.indices = Asp.indices.astype(idx_dtype, copy=False)
Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False)
x = spsolve(Asp, b)
assert_(norm(b - Asp@x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_array(Adense)
rng = np.random.default_rng(1234)
x = rng.standard_normal(3)
b = As@x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_array(Adense)
rng = np.random.default_rng(1234)
x = rng.standard_normal((3, 4))
Bdense = As.dot(x)
Bs = csc_array(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.toarray())
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_array(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_array((data,(row,col)), shape=(3,3), dtype=float)
M = sM.toarray()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_array((data, (row,col)), shape=(3,3), dtype=float)
N = sN.toarray()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.toarray())
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_array([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_array([[1], [6]]),
csr_array([[1], [6]]),
dok_array([[1], [6]]),
bsr_array([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_array([[1., 2., 3.], [6., 8., 10.]]),
csr_array([[1., 2., 3.], [6., 8., 10.]]),
dok_array([[1., 2., 3.], [6., 8., 10.]]),
bsr_array([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_array, csr_array, dok_array, lil_array]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x,
err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x,
err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if issparse(b) and x.ndim > 1:
assert_(issparse(x1), repr((b, spmattype, 1)))
assert_(issparse(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_array((3, 3))
b = csc_array((1, 3))
assert_raises(ValueError, spsolve, A, b)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = dia_array(((d, 2*d, d[::-1]), (-3, 0, 5)), shape=(N, N))
for container in (csc_array, csr_array):
A = container(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = f"{container!r} {badop!r}"
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(A.format == 'csc'), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(A.format == 'csc'), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(A.format == 'csc'), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_array([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.toarray(), b.toarray(), atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_array([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_array([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
x = spsolve(A, b)
assert_array_almost_equal(A @ x, b)
class TestSplu:
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = dia_array(((d, 2*d, d[::-1]), (-3, 0, 5)), shape=(n, n)).tocsc()
def _smoketest(self, spxlu, check, dtype, idx_dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
A.indices = A.indices.astype(idx_dtype, copy=False)
A.indptr = A.indptr.astype(idx_dtype, copy=False)
lu = spxlu(A)
rng = np.random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = f"k={k!r}"
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A @ x
assert_(abs(r - b).max() < 1e3*eps, msg)
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
for idx_dtype in [np.int32, np.int64]:
self._smoketest(splu, check, dtype, idx_dtype)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A @ x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
for idx_dtype in [np.int32, np.int64]:
self._smoketest(spilu, check, dtype, idx_dtype)
assert_(max(errors) > 1e-5)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = eye_array(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_array((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_array((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = np.random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_array(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_array(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
rng = np.random.default_rng(1342354)
a = rng.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_array(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_array(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
@pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
def test_natural_permc(self, splu_fun, rtol):
# Test that the "NATURAL" permc_spec does not permute the matrix
rng = np.random.RandomState(42)
n = 500
p = 0.01
A = scipy.sparse.random(n, n, p, random_state=rng)
x = rng.rand(n)
# Make A diagonal dominant to make sure it is not singular
A += (n+1)*scipy.sparse.eye_array(n)
A_ = csc_array(A)
b = A_ @ x
# without permc_spec, permutation is not identity
lu = splu_fun(A_)
assert_(np.any(lu.perm_c != np.arange(n)))
# with permc_spec="NATURAL", permutation is identity
lu = splu_fun(A_, permc_spec="NATURAL")
assert_array_equal(lu.perm_c, np.arange(n))
# Also, lu decomposition is valid
x2 = lu.solve(b)
assert_allclose(x, x2, rtol=rtol)
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
rng = np.random.default_rng(1342354)
a = rng.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_array(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
rng = np.random.default_rng(235634)
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = rng.random(42)
B = rng.random((42, 3))
BB = rng.random((self.n, 3, 9))
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = dia_array(((d, 2*d, d[::-1]), (-3, 0, 5)), shape=(n, n))
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.toarray()
assert_(not np.isnan(B).any())
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertised
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L @ lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@pytest.mark.thread_unsafe
@pytest.mark.slow
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except Exception:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
@pytest.mark.thread_unsafe
def test_singular_matrix(self):
# Test that SuperLU does not print to stdout when a singular matrix is
# passed. See gh-20993.
A = eye_array(10, format='csr')
A[-1, -1] = 0
b = np.zeros(10)
with pytest.warns(MatrixRankWarning):
res = spsolve(A, b)
assert np.isnan(res).all()
class TestGstrsErrors:
def setup_method(self):
self.A = array([[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]], dtype=np.float64)
self.b = np.array([[1.0],[2.0],[3.0]], dtype=np.float64)
def test_trans(self):
L = scipy.sparse.tril(self.A, format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(ValueError, match="trans must be N, T, or H"):
_superlu.gstrs('X', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr, self.b)
def test_shape_LU(self):
L = scipy.sparse.tril(self.A[0:2,0:2], format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(ValueError, match="L and U must have the same dimension"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr, self.b)
def test_shape_b(self):
L = scipy.sparse.tril(self.A, format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(ValueError, match="right hand side array has invalid shape"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr,
self.b[0:2])
def test_types_differ(self):
L = scipy.sparse.tril(self.A.astype(np.float32), format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(TypeError, match="nzvals types of L and U differ"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr, self.b)
def test_types_unsupported(self):
L = scipy.sparse.tril(self.A.astype(np.uint8), format='csc')
U = scipy.sparse.triu(self.A.astype(np.uint8), k=1, format='csc')
with assert_raises(TypeError, match="nzvals is not of a type supported"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr,
self.b.astype(np.uint8))
class TestSpsolveTriangular:
def setup_method(self):
use_solver(useUmfpack=False)
@pytest.mark.parametrize("fmt",["csr","csc"])
def test_zero_diagonal(self,fmt):
n = 5
rng = np.random.default_rng(43876432987)
A = rng.standard_normal((n, n))
b = np.arange(n)
A = scipy.sparse.tril(A, k=0, format=fmt)
x = spsolve_triangular(A, b, unit_diagonal=True, lower=True)
A.setdiag(1)
assert_allclose(A.dot(x), b)
# Regression test from gh-15199
A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64)
b = np.array([1., 2., 3.])
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, "CSC or CSR matrix format is")
spsolve_triangular(A, b, unit_diagonal=True)
@pytest.mark.parametrize("fmt",["csr","csc"])
def test_singular(self,fmt):
n = 5
if fmt == "csr":
A = csr_array((n, n))
else:
A = csc_array((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError,
spsolve_triangular, A, b, lower=lower)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_array(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_array, csr_array):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@pytest.mark.thread_unsafe
@pytest.mark.slow
@sup_sparse_efficiency
@pytest.mark.parametrize("n", [10, 10**2, 10**3])
@pytest.mark.parametrize("m", [1, 10])
@pytest.mark.parametrize("lower", [True, False])
@pytest.mark.parametrize("format", ["csr", "csc"])
@pytest.mark.parametrize("unit_diagonal", [False, True])
@pytest.mark.parametrize("choice_of_A", ["real", "complex"])
@pytest.mark.parametrize("choice_of_b", ["floats", "ints", "complexints"])
def test_random(self, n, m, lower, format, unit_diagonal, choice_of_A, choice_of_b):
def random_triangle_matrix(n, lower=True, format="csr", choice_of_A="real"):
if choice_of_A == "real":
dtype = np.float64
elif choice_of_A == "complex":
dtype = np.complex128
else:
raise ValueError("choice_of_A must be 'real' or 'complex'.")
rng = np.random.default_rng(789002319)
rvs = rng.random
A = scipy.sparse.random(n, n, density=0.1, format='lil', dtype=dtype,
random_state=rng, data_rvs=rvs)
if lower:
A = scipy.sparse.tril(A, format="lil")
else:
A = scipy.sparse.triu(A, format="lil")
for i in range(n):
A[i, i] = np.random.rand() + 1
if format == "csc":
A = A.tocsc(copy=False)
else:
A = A.tocsr(copy=False)
return A
rng = np.random.default_rng(1234)
A = random_triangle_matrix(n, lower=lower)
if choice_of_b == "floats":
b = rng.random((n, m))
elif choice_of_b == "ints":
b = rng.integers(-9, 9, (n, m))
elif choice_of_b == "complexints":
b = rng.integers(-9, 9, (n, m)) + rng.integers(-9, 9, (n, m)) * 1j
else:
raise ValueError(
"choice_of_b must be 'floats', 'ints', or 'complexints'.")
x = spsolve_triangular(A, b, lower=lower, unit_diagonal=unit_diagonal)
if unit_diagonal:
A.setdiag(1)
assert_allclose(A.dot(x), b, atol=1.5e-6)
@pytest.mark.thread_unsafe
@sup_sparse_efficiency
@pytest.mark.parametrize("nnz", [10, 10**2, 10**3])
@pytest.mark.parametrize("fmt", ["csr", "csc", "coo", "dia", "dok", "lil"])
def test_is_sptriangular_and_spbandwidth(nnz, fmt):
rng = np.random.default_rng(42)
N = nnz // 2
dens = 0.1
A = scipy.sparse.random_array((N, N), density=dens, format="csr", rng=rng)
A[1, 3] = A[3, 1] = 22 # ensure not upper or lower
A = A.asformat(fmt)
AU = scipy.sparse.triu(A, format=fmt)
AL = scipy.sparse.tril(A, format=fmt)
D = 0.1 * scipy.sparse.eye_array(N, format=fmt)
assert is_sptriangular(A) == (False, False)
assert is_sptriangular(AL) == (True, False)
assert is_sptriangular(AU) == (False, True)
assert is_sptriangular(D) == (True, True)
assert spbandwidth(A) == scipy.linalg.bandwidth(A.toarray())
assert spbandwidth(AU) == scipy.linalg.bandwidth(AU.toarray())
assert spbandwidth(AL) == scipy.linalg.bandwidth(AL.toarray())
assert spbandwidth(D) == scipy.linalg.bandwidth(D.toarray())

View file

@ -0,0 +1,22 @@
"""
Sparse Eigenvalue Solvers
-------------------------
The submodules of sparse.linalg._eigen:
1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
"""
from .arpack import *
from .lobpcg import *
from ._svds import svds
from . import arpack
__all__ = [
'ArpackError', 'ArpackNoConvergence',
'eigs', 'eigsh', 'lobpcg', 'svds'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,540 @@
import math
import numpy as np
from .arpack import _arpack # type: ignore[attr-defined]
from . import eigsh
from scipy._lib._util import check_random_state, _transition_to_rng
from scipy.sparse.linalg._interface import LinearOperator, aslinearoperator
from scipy.sparse.linalg._eigen.lobpcg import lobpcg # type: ignore[no-redef]
from scipy.sparse.linalg._svdp import _svdp
from scipy.linalg import svd
arpack_int = _arpack.timing.nbx.dtype
__all__ = ['svds']
def _herm(x):
return x.T.conj()
def _iv(A, k, ncv, tol, which, v0, maxiter,
return_singular, solver, rng):
# input validation/standardization for `solver`
# out of order because it's needed for other parameters
solver = str(solver).lower()
solvers = {"arpack", "lobpcg", "propack"}
if solver not in solvers:
raise ValueError(f"solver must be one of {solvers}.")
# input validation/standardization for `A`
A = aslinearoperator(A) # this takes care of some input validation
if not np.issubdtype(A.dtype, np.number):
message = "`A` must be of numeric data type."
raise ValueError(message)
if math.prod(A.shape) == 0:
message = "`A` must not be empty."
raise ValueError(message)
# input validation/standardization for `k`
kmax = min(A.shape) if solver == 'propack' else min(A.shape) - 1
if int(k) != k or not (0 < k <= kmax):
message = "`k` must be an integer satisfying `0 < k < min(A.shape)`."
raise ValueError(message)
k = int(k)
# input validation/standardization for `ncv`
if solver == "arpack" and ncv is not None:
if int(ncv) != ncv or not (k < ncv < min(A.shape)):
message = ("`ncv` must be an integer satisfying "
"`k < ncv < min(A.shape)`.")
raise ValueError(message)
ncv = int(ncv)
# input validation/standardization for `tol`
if tol < 0 or not np.isfinite(tol):
message = "`tol` must be a non-negative floating point value."
raise ValueError(message)
tol = float(tol)
# input validation/standardization for `which`
which = str(which).upper()
whichs = {'LM', 'SM'}
if which not in whichs:
raise ValueError(f"`which` must be in {whichs}.")
# input validation/standardization for `v0`
if v0 is not None:
v0 = np.atleast_1d(v0)
if not (np.issubdtype(v0.dtype, np.complexfloating)
or np.issubdtype(v0.dtype, np.floating)):
message = ("`v0` must be of floating or complex floating "
"data type.")
raise ValueError(message)
shape = (A.shape[0],) if solver == 'propack' else (min(A.shape),)
if v0.shape != shape:
message = f"`v0` must have shape {shape}."
raise ValueError(message)
# input validation/standardization for `maxiter`
if maxiter is not None and (int(maxiter) != maxiter or maxiter <= 0):
message = "`maxiter` must be a positive integer."
raise ValueError(message)
maxiter = int(maxiter) if maxiter is not None else maxiter
# input validation/standardization for `return_singular_vectors`
# not going to be flexible with this; too complicated for little gain
rs_options = {True, False, "vh", "u"}
if return_singular not in rs_options:
raise ValueError(f"`return_singular_vectors` must be in {rs_options}.")
rng = check_random_state(rng)
return (A, k, ncv, tol, which, v0, maxiter,
return_singular, solver, rng)
@_transition_to_rng("random_state", position_num=9)
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='arpack', rng=None, options=None):
"""
Partial singular value decomposition of a sparse matrix.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : ndarray, sparse matrix, or LinearOperator
Matrix to decompose of a floating point numeric dtype.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for
``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise.
ncv : int, optional
When ``solver='arpack'``, this is the number of Lanczos vectors
generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is
ignored.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
The starting vector for iteration; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
maxiter : int, optional
Maximum number of iterations; see method-specific
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
If ``solver='propack'``, the option is respected regardless of the
matrix shape.
solver : {'arpack', 'propack', 'lobpcg'}, optional
The solver used.
:ref:`'arpack' <sparse.linalg.svds-arpack>`,
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and
:ref:`'propack' <sparse.linalg.svds-propack>` are supported.
Default: `'arpack'`.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using ARPACK or LOBPCG as an eigensolver
on the matrix ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on
which one is smaller size, followed by the Rayleigh-Ritz method
as postprocessing; see
Using the normal matrix, in Rayleigh-Ritz method, (2022, Nov. 19),
Wikipedia, https://w.wiki/4zms.
Alternatively, the PROPACK solver can be called.
Choices of the input matrix `A` numeric dtype may be limited.
Only ``solver="lobpcg"`` supports all floating point dtypes
real: 'np.float32', 'np.float64', 'np.longdouble' and
complex: 'np.complex64', 'np.complex128', 'np.clongdouble'.
The ``solver="arpack"`` supports only
'np.float32', 'np.float64', and 'np.complex128'.
Examples
--------
Construct a matrix `A` from singular values and vectors.
>>> import numpy as np
>>> from scipy import sparse, linalg, stats
>>> from scipy.sparse.linalg import svds, aslinearoperator, LinearOperator
Construct a dense matrix `A` from singular values and vectors.
>>> rng = np.random.default_rng(258265244568965474821194062361901728911)
>>> orthogonal = stats.ortho_group.rvs(10, random_state=rng)
>>> s = [1e-3, 1, 2, 3, 4] # non-zero singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ np.diag(s) @ vT
With only four singular values/vectors, the SVD approximates the original
matrix.
>>> u4, s4, vT4 = svds(A, k=4)
>>> A4 = u4 @ np.diag(s4) @ vT4
>>> np.allclose(A4, A, atol=1e-3)
True
With all five non-zero singular values/vectors, we can reproduce
the original matrix more accurately.
>>> u5, s5, vT5 = svds(A, k=5)
>>> A5 = u5 @ np.diag(s5) @ vT5
>>> np.allclose(A5, A)
True
The singular values match the expected singular values.
>>> np.allclose(s5, s)
True
Since the singular values are not close to each other in this example,
every singular vector matches as expected up to a difference in sign.
>>> (np.allclose(np.abs(u5), np.abs(u)) and
... np.allclose(np.abs(vT5), np.abs(vT)))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u5.T @ u5, np.eye(5)) and
... np.allclose(vT5 @ vT5.T, np.eye(5)))
True
If there are (nearly) multiple singular values, the corresponding
individual singular vectors may be unstable, but the whole invariant
subspace containing all such singular vectors is computed accurately
as can be measured by angles between subspaces via 'subspace_angles'.
>>> rng = np.random.default_rng(178686584221410808734965903901790843963)
>>> s = [1, 1 + 1e-6] # non-zero singular values
>>> u, _ = np.linalg.qr(rng.standard_normal((99, 2)))
>>> v, _ = np.linalg.qr(rng.standard_normal((99, 2)))
>>> vT = v.T
>>> A = u @ np.diag(s) @ vT
>>> A = A.astype(np.float32)
>>> u2, s2, vT2 = svds(A, k=2, rng=rng)
>>> np.allclose(s2, s)
True
The angles between the individual exact and computed singular vectors
may not be so small. To check use:
>>> (linalg.subspace_angles(u2[:, :1], u[:, :1]) +
... linalg.subspace_angles(u2[:, 1:], u[:, 1:]))
array([0.06562513]) # may vary
>>> (linalg.subspace_angles(vT2[:1, :].T, vT[:1, :].T) +
... linalg.subspace_angles(vT2[1:, :].T, vT[1:, :].T))
array([0.06562507]) # may vary
As opposed to the angles between the 2-dimensional invariant subspaces
that these vectors span, which are small for rights singular vectors
>>> linalg.subspace_angles(u2, u).sum() < 1e-6
True
as well as for left singular vectors.
>>> linalg.subspace_angles(vT2.T, vT.T).sum() < 1e-6
True
The next example follows that of 'sklearn.decomposition.TruncatedSVD'.
>>> rng = np.random.default_rng(0)
>>> X_dense = rng.random(size=(100, 100))
>>> X_dense[:, 2 * np.arange(50)] = 0
>>> X = sparse.csr_array(X_dense)
>>> _, singular_values, _ = svds(X, k=5, rng=rng)
>>> print(singular_values)
[ 4.3221... 4.4043... 4.4907... 4.5858... 35.4549...]
The function can be called without the transpose of the input matrix
ever explicitly constructed.
>>> rng = np.random.default_rng(102524723947864966825913730119128190974)
>>> G = sparse.random_array((8, 9), density=0.5, rng=rng)
>>> Glo = aslinearoperator(G)
>>> _, singular_values_svds, _ = svds(Glo, k=5, rng=rng)
>>> _, singular_values_svd, _ = linalg.svd(G.toarray())
>>> np.allclose(singular_values_svds, singular_values_svd[-4::-1])
True
The most memory efficient scenario is where neither
the original matrix, nor its transpose, is explicitly constructed.
Our example computes the smallest singular values and vectors
of 'LinearOperator' constructed from the numpy function 'np.diff' used
column-wise to be consistent with 'LinearOperator' operating on columns.
>>> diff0 = lambda a: np.diff(a, axis=0)
Let us create the matrix from 'diff0' to be used for validation only.
>>> n = 5 # The dimension of the space.
>>> M_from_diff0 = diff0(np.eye(n))
>>> print(M_from_diff0.astype(int))
[[-1 1 0 0 0]
[ 0 -1 1 0 0]
[ 0 0 -1 1 0]
[ 0 0 0 -1 1]]
The matrix 'M_from_diff0' is bi-diagonal and could be alternatively
created directly by
>>> M = - np.eye(n - 1, n, dtype=int)
>>> np.fill_diagonal(M[:,1:], 1)
>>> np.allclose(M, M_from_diff0)
True
Its transpose
>>> print(M.T)
[[-1 0 0 0]
[ 1 -1 0 0]
[ 0 1 -1 0]
[ 0 0 1 -1]
[ 0 0 0 1]]
can be viewed as the incidence matrix; see
Incidence matrix, (2022, Nov. 19), Wikipedia, https://w.wiki/5YXU,
of a linear graph with 5 vertices and 4 edges. The 5x5 normal matrix
``M.T @ M`` thus is
>>> print(M.T @ M)
[[ 1 -1 0 0 0]
[-1 2 -1 0 0]
[ 0 -1 2 -1 0]
[ 0 0 -1 2 -1]
[ 0 0 0 -1 1]]
the graph Laplacian, while the actually used in 'svds' smaller size
4x4 normal matrix ``M @ M.T``
>>> print(M @ M.T)
[[ 2 -1 0 0]
[-1 2 -1 0]
[ 0 -1 2 -1]
[ 0 0 -1 2]]
is the so-called edge-based Laplacian; see
Symmetric Laplacian via the incidence matrix, in Laplacian matrix,
(2022, Nov. 19), Wikipedia, https://w.wiki/5YXW.
The 'LinearOperator' setup needs the options 'rmatvec' and 'rmatmat'
of multiplication by the matrix transpose ``M.T``, but we want to be
matrix-free to save memory, so knowing how ``M.T`` looks like, we
manually construct the following function to be
used in ``rmatmat=diff0t``.
>>> def diff0t(a):
... if a.ndim == 1:
... a = a[:,np.newaxis] # Turn 1D into 2D array
... d = np.zeros((a.shape[0] + 1, a.shape[1]), dtype=a.dtype)
... d[0, :] = - a[0, :]
... d[1:-1, :] = a[0:-1, :] - a[1:, :]
... d[-1, :] = a[-1, :]
... return d
We check that our function 'diff0t' for the matrix transpose is valid.
>>> np.allclose(M.T, diff0t(np.eye(n-1)))
True
Now we setup our matrix-free 'LinearOperator' called 'diff0_func_aslo'
and for validation the matrix-based 'diff0_matrix_aslo'.
>>> def diff0_func_aslo_def(n):
... return LinearOperator(matvec=diff0,
... matmat=diff0,
... rmatvec=diff0t,
... rmatmat=diff0t,
... shape=(n - 1, n))
>>> diff0_func_aslo = diff0_func_aslo_def(n)
>>> diff0_matrix_aslo = aslinearoperator(M_from_diff0)
And validate both the matrix and its transpose in 'LinearOperator'.
>>> np.allclose(diff0_func_aslo(np.eye(n)),
... diff0_matrix_aslo(np.eye(n)))
True
>>> np.allclose(diff0_func_aslo.T(np.eye(n-1)),
... diff0_matrix_aslo.T(np.eye(n-1)))
True
Having the 'LinearOperator' setup validated, we run the solver.
>>> n = 100
>>> diff0_func_aslo = diff0_func_aslo_def(n)
>>> u, s, vT = svds(diff0_func_aslo, k=3, which='SM')
The singular values squared and the singular vectors are known
explicitly; see
Pure Dirichlet boundary conditions, in
Eigenvalues and eigenvectors of the second derivative,
(2022, Nov. 19), Wikipedia, https://w.wiki/5YX6,
since 'diff' corresponds to first
derivative, and its smaller size n-1 x n-1 normal matrix
``M @ M.T`` represent the discrete second derivative with the Dirichlet
boundary conditions. We use these analytic expressions for validation.
>>> se = 2. * np.sin(np.pi * np.arange(1, 4) / (2. * n))
>>> ue = np.sqrt(2 / n) * np.sin(np.pi * np.outer(np.arange(1, n),
... np.arange(1, 4)) / n)
>>> np.allclose(s, se, atol=1e-3)
True
>>> np.allclose(np.abs(u), np.abs(ue), atol=1e-6)
True
"""
args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors,
solver, rng)
(A, k, ncv, tol, which, v0, maxiter,
return_singular_vectors, solver, rng) = args
largest = (which == 'LM')
n, m = A.shape
if n >= m:
X_dot = A.matvec
X_matmat = A.matmat
XH_dot = A.rmatvec
XH_mat = A.rmatmat
transpose = False
else:
X_dot = A.rmatvec
X_matmat = A.rmatmat
XH_dot = A.matvec
XH_mat = A.matmat
transpose = True
dtype = getattr(A, 'dtype', None)
if dtype is None:
dtype = A.dot(np.zeros([m, 1])).dtype
def matvec_XH_X(x):
return XH_dot(X_dot(x))
def matmat_XH_X(x):
return XH_mat(X_matmat(x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
matmat=matmat_XH_X,
shape=(min(A.shape), min(A.shape)))
# Get a low rank approximation of the implicitly defined gramian matrix.
# This is not a stable way to approach the problem.
if solver == 'lobpcg':
if k == 1 and v0 is not None:
X = np.reshape(v0, (-1, 1))
else:
X = rng.standard_normal(size=(min(A.shape), k))
_, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
largest=largest)
elif solver == 'propack':
jobu = return_singular_vectors in {True, 'u'}
jobv = return_singular_vectors in {True, 'vh'}
irl_mode = (which == 'SM')
res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None,
compute_u=jobu, compute_v=jobv, irl_mode=irl_mode,
kmax=maxiter, v0=v0, rng=rng)
u, s, vh, _ = res # but we'll ignore bnd, the last output
# PROPACK order appears to be largest first. `svds` output order is not
# guaranteed, according to documentation, but for ARPACK and LOBPCG
# they actually are ordered smallest to largest, so reverse for
# consistency.
s = s[::-1]
u = u[:, ::-1]
vh = vh[::-1]
u = u if jobu else None
vh = vh if jobv else None
if return_singular_vectors:
return u, s, vh
else:
return s
elif solver == 'arpack' or solver is None:
if v0 is None:
v0 = rng.standard_normal(size=(min(A.shape),))
_, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
ncv=ncv, which=which, v0=v0)
# arpack do not guarantee exactly orthonormal eigenvectors
# for clustered eigenvalues, especially in complex arithmetic
eigvec, _ = np.linalg.qr(eigvec)
# the eigenvectors eigvec must be orthonomal here; see gh-16712
Av = X_matmat(eigvec)
if not return_singular_vectors:
s = svd(Av, compute_uv=False, overwrite_a=True)
return s[::-1]
# compute the left singular vectors of X and update the right ones
# accordingly
u, s, vh = svd(Av, full_matrices=False, overwrite_a=True)
u = u[:, ::-1]
s = s[::-1]
vh = vh[::-1]
jobu = return_singular_vectors in {True, 'u'}
jobv = return_singular_vectors in {True, 'vh'}
if transpose:
u_tmp = eigvec @ _herm(vh) if jobu else None
vh = _herm(u) if jobv else None
u = u_tmp
else:
if not jobu:
u = None
vh = vh @ _herm(eigvec) if jobv else None
return u, s, vh

View file

@ -0,0 +1,382 @@
def _svds_arpack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='arpack', rng=None):
"""
Partial singular value decomposition of a sparse matrix using ARPACK.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, optional
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N) - 1``.
Default is 6.
ncv : int, optional
The number of Lanczos vectors generated.
The default is ``min(n, max(2*k + 1, 20))``.
If specified, must satisfy ``k + 1 < ncv < min(M, N)``; ``ncv > 2*k``
is recommended.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
The starting vector for iteration:
an (approximate) left singular vector if ``N > M`` and a right singular
vector otherwise. Must be of length ``min(M, N)``.
Default: random
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed;
default is ``min(M, N) * 10``.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='arpack'``.
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` and
:ref:`'propack' <sparse.linalg.svds-propack>`
are also supported.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using ARPACK as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_array, diags_array
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_array(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags_array(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='arpack')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.toarray(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='arpack')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.toarray())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
def _svds_lobpcg_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='lobpcg', rng=None):
"""
Partial singular value decomposition of a sparse matrix using LOBPCG.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N) - 1``.
ncv : int, optional
Ignored.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
If `k` is 1, the starting vector for iteration:
an (approximate) left singular vector if ``N > M`` and a right singular
vector otherwise. Must be of length ``min(M, N)``.
Ignored otherwise.
Default: random
maxiter : int, default: 20
Maximum number of iterations.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='lobpcg'``.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'propack' <sparse.linalg.svds-propack>`
are also supported.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using LOBPCG as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_array, diags_array
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_array(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags_array(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='lobpcg')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.toarray(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='lobpcg')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.toarray())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.todense())) and
... np.allclose(np.abs(vT3), np.abs(vT.todense())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
def _svds_propack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='propack', rng=None):
"""
Partial singular value decomposition of a sparse matrix using PROPACK.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose. If `A` is a ``LinearOperator``
object, it must define both ``matvec`` and ``rmatvec`` methods.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N)``.
ncv : int, optional
Ignored.
tol : float, optional
The desired relative accuracy for computed singular values.
Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values. Note that choosing
``which='SM'`` will force the ``irl`` option to be set ``True``.
v0 : ndarray, optional
Starting vector for iterations: must be of length ``A.shape[0]``.
If not specified, PROPACK will generate a starting vector.
maxiter : int, optional
Maximum number of iterations / maximal dimension of the Krylov
subspace. Default is ``10 * k``.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: compute only the left singular vectors; return ``None`` for
the right singular vectors.
- ``"vh"``: compute only the right singular vectors; return ``None``
for the left singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='propack'``.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`
are also supported.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is an interface to the Fortran library PROPACK [1]_.
The current default is to run with IRL mode disabled unless seeking the
smallest singular values/vectors (``which='SM'``).
References
----------
.. [1] Larsen, Rasmus Munk. "PROPACK-Software for large and sparse SVD
calculations." Available online. URL
http://sun.stanford.edu/~rmunk/PROPACK (2004): 2008-2009.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_array, diags_array
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_array(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags_array(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='propack')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.todense(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='propack')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.todense())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass

View file

@ -0,0 +1,45 @@
BSD Software License
Pertains to ARPACK and P_ARPACK
Copyright (c) 1996-2008 Rice University.
Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff.
All rights reserved.
Arpack has been renamed to arpack-ng.
Copyright (c) 2001-2011 - Scilab Enterprises
Updated by Allan Cornet, Sylvestre Ledru.
Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch)
Copyright (c) 2007 - Sébastien Fabbro (gentoo patch)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer listed
in this license in the documentation and/or other materials
provided with the distribution.
- Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,20 @@
"""
Eigenvalue solver using iterative methods.
Find k eigenvectors and eigenvalues of a matrix A using the
Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
These methods are most useful for large sparse matrices.
- eigs(A,k)
- eigsh(A,k)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
from .arpack import *

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,717 @@
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import itertools
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_array, csr_array, diags_array, random_array
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg._eigen.arpack import (eigs, eigsh, arpack,
ArpackNoConvergence)
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None, D_type=None, which=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_array, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if (
isinstance(mattype, type) and issubclass(mattype, csr_array)
and type_char in ('f', 'F')
):
# sparse in single precision: worse errors
rtol *= 5
if (
which in ('LM', 'SM', 'LA')
and D_type.name == "gen-hermitian-Mc"
):
if type_char == 'F':
# missing case 1, 2, and more, from PR 14798
rtol *= 5
if type_char == 'D':
# missing more cases, from PR 14798
rtol *= 10
atol *= 10
return tol, rtol, atol
def generate_matrix(N, complex_=False, hermitian=False,
pos_definite=False, sparse=False, rng=None):
M = rng.random((N, N))
if complex_:
M = M + 1j * rng.random((N, N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = rng.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i, j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = rng.randint(N, size=N * N // 4)
j = rng.randint(N, size=N * N // 4)
ind = np.nonzero(i == j)
j[ind] = (j[ind] + 1) % N
M[i, j] = 0
M[j, i] = 0
else:
if sparse:
i = rng.randint(N, size=N * N // 2)
j = rng.randint(N, size=N * N // 2)
M[i, j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False, rng=None):
M = rng.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_array(M)
M += Id
else:
if sparse:
M = csr_array(M)
return M
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except AssertionError:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eigenvalues, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eigenvalues, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eigenvalues - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eigenvalues - sigma)
+ 1. / (eigenvalues - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eigenvalues - sigma)
- 1. / (eigenvalues - np.conj(sigma)))
elif mode == 'cayley':
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
elif mode == 'buckling':
reval = eigenvalues / (eigenvalues - sigma)
else:
raise ValueError(f"mode='{mode}' not recognized")
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError(f"which='{which}' is unrecognized")
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = (f"error for {eigs_func.__name__}:general, typ={typ}, which={which}, "
f"sigma={sigma}, mattype={mattype.__name__},"
f" OPpart={OPpart}, mode={mode}")
else:
err = (f"error for {eigs_func.__name__}:standard, typ={typ}, which={which}, "
f"sigma={sigma}, mattype={mattype.__name__}, "
f"OPpart={OPpart}, mode={mode}")
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ)
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, d, which)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eigenvalues, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eigenvalues, typ, k, which,
sigma, OPpart, mode)
eigenvalues = eigenvalues[ind]
evec = evec[:, ind]
try:
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
check_evecs = True
except AssertionError:
check_evecs = False
ntries += 1
if check_evecs:
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eigenvalues * np.dot(b, evec)
else:
RHS = eigenvalues * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
break
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return f"<{self.name}>"
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_array, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
rng = np.random.RandomState(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True,
rng=rng).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True,
rng=rng).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True, rng=rng).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True, rng=rng).astype('F').astype('D')
v0 = rng.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_array, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
rng = np.random.RandomState(2300)
Ar = generate_matrix(N, rng=rng).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True, rng=rng).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True, rng=rng).astype('F').astype('D')
v0 = rng.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
@pytest.mark.iterations(1)
@pytest.mark.thread_unsafe
def test_symmetric_modes(num_parallel_threads):
assert num_parallel_threads == 1
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
rng = np.random.RandomState(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True, rng=rng)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
rng = np.random.RandomState(1234)
m = generate_matrix(30, complex_=True, rng=rng)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_array(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_array(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_array(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
@pytest.mark.thread_unsafe
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags_array([w0], offsets=[0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
rng = np.random.RandomState(1234)
A_sparse = diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False, rng=rng)
M_dense = rng.random((4, 4))
M_sparse = generate_matrix(4, sparse=True, rng=rng)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
rng = np.random.RandomState(1234)
A_sparse = diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False, rng=rng)
M_dense = generate_matrix_symmetric(4, pos_definite=True, rng=rng)
M_sparse = generate_matrix_symmetric(
4, pos_definite=True, sparse=True, rng=rng)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
rng = np.random.default_rng(2)
n = 10
A = random_array(shape=(n, n), density=0.5, rng=rng)
A.data *= 2
A.data -= 1
A += A.T # make symmetric to test real eigenvalues
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w

View file

@ -0,0 +1,16 @@
"""
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
Call the function lobpcg - see help for lobpcg.lobpcg.
"""
from .lobpcg import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,725 @@
""" Test functions for the sparse.linalg._eigen.lobpcg module
"""
import itertools
import platform
import sys
import pytest
import numpy as np
from numpy import ones, r_, diag
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from scipy import sparse
from scipy.linalg import (eigh, toeplitz,
cholesky_banded, cho_solve_banded)
from scipy.sparse import dia_array, eye_array, csr_array
from scipy.sparse.linalg import eigsh, LinearOperator
from scipy.sparse.linalg._eigen.lobpcg import lobpcg
from scipy.sparse.linalg._eigen.lobpcg.lobpcg import _b_orthonormalize
from scipy._lib._util import np_long, np_ulong
from scipy.sparse.linalg._special_sparse_arrays import (Sakurai,
MikotaPair)
_IS_32BIT = (sys.maxsize < 2**32)
INT_DTYPES = (np.intc, np_long, np.longlong, np.uintc, np_ulong, np.ulonglong)
# np.half is unsupported on many test systems so excluded
REAL_DTYPES = (np.float32, np.float64, np.longdouble)
COMPLEX_DTYPES = (np.complex64, np.complex128, np.clongdouble)
INEXACTDTYPES = REAL_DTYPES + COMPLEX_DTYPES
ALLDTYPES = INT_DTYPES + INEXACTDTYPES
def sign_align(A, B):
"""Align signs of columns of A match those of B: column-wise remove
sign of A by multiplying with its sign then multiply in sign of B.
"""
return np.array([col_A * np.sign(col_A[0]) * np.sign(col_B[0])
for col_A, col_B in zip(A.T, B.T)]).T
def ElasticRod(n):
"""Build the matrices for the generalized eigenvalue problem of the
fixed-free elastic rod vibration model.
"""
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
return A, B
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize("n", [10, 20])
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_ElasticRod(n):
"""Check eigh vs. lobpcg consistency for elastic rod model.
"""
A, B = ElasticRod(n)
m = 2
rnd = np.random.RandomState(0)
X = rnd.standard_normal((n, m))
eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False)
eigvals.sort()
w, _ = eigh(A, b=B)
w.sort()
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
@pytest.mark.parametrize("n", [50])
@pytest.mark.parametrize("m", [1, 2, 10])
@pytest.mark.filterwarnings("ignore:Casting complex values to real")
@pytest.mark.parametrize("Vdtype", INEXACTDTYPES)
@pytest.mark.parametrize("Bdtype", ALLDTYPES)
@pytest.mark.parametrize("BVdtype", INEXACTDTYPES)
def test_b_orthonormalize(n, m, Vdtype, Bdtype, BVdtype):
"""Test B-orthonormalization by Cholesky with callable 'B'.
The function '_b_orthonormalize' is key in LOBPCG but may
lead to numerical instabilities. The input vectors are often
badly scaled, so the function needs scale-invariant Cholesky;
see https://netlib.org/lapack/lawnspdf/lawn14.pdf.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((n, m)).astype(Vdtype)
Xcopy = np.copy(X)
vals = np.arange(1, n+1, dtype=float)
B = dia_array(([vals], [0]), shape=(n, n)).astype(Bdtype)
BX = B @ X
BX = BX.astype(BVdtype)
is_all_complex = (np.issubdtype(Vdtype, np.complexfloating) and
np.issubdtype(BVdtype, np.complexfloating))
is_all_notcomplex = (not np.issubdtype(Vdtype, np.complexfloating) and
not np.issubdtype(Bdtype, np.complexfloating) and
not np.issubdtype(BVdtype, np.complexfloating))
# All complex or all not complex can calculate in-place
check_inplace = is_all_complex or is_all_notcomplex
# np.longdouble tol cannot be achieved on most systems
atol = m * n * max(np.finfo(Vdtype).eps,
np.finfo(BVdtype).eps,
np.finfo(np.float64).eps)
Xo, BXo, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
if check_inplace:
# Check in-place
assert_equal(X, Xo)
assert_equal(id(X), id(Xo))
assert_equal(BX, BXo)
assert_equal(id(BX), id(BXo))
# Check BXo
assert_allclose(B @ Xo, BXo, atol=atol, rtol=atol)
# Check B-orthonormality
assert_allclose(Xo.T.conj() @ B @ Xo, np.identity(m),
atol=atol, rtol=atol)
# Repeat without BX in outputs
X = np.copy(Xcopy)
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X)
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
if check_inplace:
# Check in-place.
assert_equal(X, Xo1)
assert_equal(id(X), id(Xo1))
# Check BXo1
assert_allclose(B @ Xo1, BXo1, atol=atol, rtol=atol)
# Introduce column-scaling in X
scaling = 1.0 / np.geomspace(10, 1e10, num=m)
X = Xcopy * scaling
X = X.astype(Vdtype)
BX = B @ X
BX = BX.astype(BVdtype)
# Check scaling-invariance of Cholesky-based orthonormalization
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
# The output should be the same, up the signs of the columns
Xo1 = sign_align(Xo1, Xo)
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
BXo1 = sign_align(BXo1, BXo)
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
@pytest.mark.thread_unsafe
@pytest.mark.filterwarnings("ignore:Exited at iteration 0")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_nonhermitian_warning(capsys):
"""Check the warning of a Ritz matrix being not Hermitian
by feeding a non-Hermitian input matrix.
Also check stdout since verbosityLevel=1 and lack of stderr.
"""
n = 10
X = np.arange(n * 2).reshape(n, 2).astype(np.float32)
A = np.arange(n * n).reshape(n, n).astype(np.float32)
with pytest.warns(UserWarning, match="Matrix gramA"):
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
out, err = capsys.readouterr() # Capture output
assert out.startswith("Solving standard eigenvalue") # Test stdout
assert err == '' # Test empty stderr
# Make the matrix symmetric and the UserWarning disappears.
A += A.T
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
out, err = capsys.readouterr() # Capture output
assert out.startswith("Solving standard eigenvalue") # Test stdout
assert err == '' # Test empty stderr
def test_regression():
"""Check the eigenvalue of the identity matrix is one.
"""
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, _ = lobpcg(A, X)
assert_allclose(w, [1])
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize('n, m, m_excluded', [(30, 4, 3), (4, 2, 0)])
def test_diagonal(n, m, m_excluded):
"""Test ``m - m_excluded`` eigenvalues and eigenvectors of
diagonal matrices of the size ``n`` varying matrix formats:
dense array, spare matrix, and ``LinearOperator`` for both
matrixes in the generalized eigenvalue problem ``Av = cBv``
and for the preconditioner.
"""
rnd = np.random.RandomState(0)
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# A is the diagonal matrix whose entries are 1,...n,
# B is the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A_s = dia_array(([vals], [0]), shape=(n, n))
A_a = A_s.toarray()
def A_f(x):
return A_s @ x
A_lo = LinearOperator(matvec=A_f,
matmat=A_f,
shape=(n, n), dtype=float)
B_a = eye_array(n)
B_s = csr_array(B_a)
def B_f(x):
return B_a @ x
B_lo = LinearOperator(matvec=B_f,
matmat=B_f,
shape=(n, n), dtype=float)
# Let the preconditioner M be the inverse of A.
M_s = dia_array(([1./vals], [0]), shape=(n, n))
M_a = M_s.toarray()
def M_f(x):
return M_s @ x
M_lo = LinearOperator(matvec=M_f,
matmat=M_f,
shape=(n, n), dtype=float)
# Pick random initial vectors.
X = rnd.normal(size=(n, m))
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
if m_excluded > 0:
Y = np.eye(n, m_excluded)
else:
Y = None
for A in [A_a, A_s, A_lo]:
for B in [B_a, B_s, B_lo]:
for M in [M_a, M_s, M_lo]:
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y,
maxiter=40, largest=False)
assert_allclose(eigvals, np.arange(1+m_excluded,
1+m_excluded+m))
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
"""Check if the eigenvalue residual is small.
"""
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
"""Check the Fiedler vector computation.
"""
# This is not necessarily the recommended way to find the Fiedler vector.
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, _ = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
@pytest.mark.thread_unsafe
def test_fiedler_small_8():
"""Check the dense workaround path for small matrices.
"""
# This triggers the dense path because 8 < 2*5.
with pytest.warns(UserWarning, match="The problem size"):
_check_fiedler(8, 2)
def test_fiedler_large_12():
"""Check the dense workaround path avoided for non-small matrices.
"""
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
@pytest.mark.filterwarnings("ignore:Failed at iteration")
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_failure_to_run_iterations():
"""Check that the code exits gracefully without breaking. Issue #10974.
The code may or not issue a warning, filtered out. Issue #15935, #17954.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((100, 10))
A = X @ X.T
Q = rnd.standard_normal((X.shape[0], 4))
eigenvalues, _ = lobpcg(A, Q, maxiter=40, tol=1e-12)
assert np.max(eigenvalues) > 0
@pytest.mark.thread_unsafe
def test_failure_to_run_iterations_nonsymmetric():
"""Check that the code exists gracefully without breaking
if the matrix in not symmetric.
"""
A = np.zeros((10, 10))
A[0, 1] = 1
Q = np.ones((10, 1))
msg = "Exited at iteration 2|Exited postprocessing with accuracies.*"
with pytest.warns(UserWarning, match=msg):
eigenvalues, _ = lobpcg(A, Q, maxiter=20)
assert np.max(eigenvalues) > 0
@pytest.mark.filterwarnings("ignore:The problem size")
def test_hermitian():
"""Check complex-value Hermitian cases.
"""
rnd = np.random.RandomState(0)
sizes = [3, 12]
ks = [1, 2]
gens = [True, False]
for s, k, gen, dh, dx, db in (
itertools.product(sizes, ks, gens, gens, gens, gens)
):
H = rnd.random((s, s)) + 1.j * rnd.random((s, s))
H = 10 * np.eye(s) + H + H.T.conj()
H = H.astype(np.complex128) if dh else H.astype(np.complex64)
X = rnd.standard_normal((s, k))
X = X + 1.j * rnd.standard_normal((s, k))
X = X.astype(np.complex128) if dx else X.astype(np.complex64)
if not gen:
B = np.eye(s)
w, v = lobpcg(H, X, maxiter=99, verbosityLevel=0)
# Also test mixing complex H with real B.
wb, _ = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
assert_allclose(w, wb, rtol=1e-6)
w0, _ = eigh(H)
else:
B = rnd.random((s, s)) + 1.j * rnd.random((s, s))
B = 10 * np.eye(s) + B.dot(B.T.conj())
B = B.astype(np.complex128) if db else B.astype(np.complex64)
w, v = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
w0, _ = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
/ np.linalg.norm(H.dot(vx)),
0, atol=5e-2, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
# The n=5 case tests the alternative small matrix code path that uses eigh().
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
def test_eigsh_consistency(n, atol):
"""Check eigsh vs. lobpcg consistency.
"""
vals = np.arange(1, n+1, dtype=np.float64)
A = dia_array((vals, 0), shape=(n, n))
rnd = np.random.RandomState(0)
X = rnd.standard_normal((n, 2))
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
vals, _ = eigsh(A, k=2)
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
@pytest.mark.thread_unsafe
def test_verbosity():
"""Check that nonzero verbosity level code runs.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((10, 10))
A = X @ X.T
Q = rnd.standard_normal((X.shape[0], 1))
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
with pytest.warns(UserWarning, match=msg):
_, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9)
@pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32',
reason="tolerance violation on windows")
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_tolerance_float32():
"""Check lobpcg for attainable tolerance in float32.
"""
rnd = np.random.RandomState(0)
n = 50
m = 3
vals = -np.arange(1, n + 1)
A = dia_array(([vals], [0]), shape=(n, n))
A = A.astype(np.float32)
X = rnd.standard_normal((n, m))
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5)
@pytest.mark.parametrize("vdtype", INEXACTDTYPES)
@pytest.mark.parametrize("mdtype", ALLDTYPES)
@pytest.mark.parametrize("arr_type", [np.array,
sparse.csr_array,
sparse.coo_array])
def test_dtypes(vdtype, mdtype, arr_type):
"""Test lobpcg in various dtypes.
"""
rnd = np.random.RandomState(0)
n = 12
m = 2
A = arr_type(np.diag(np.arange(1, n + 1)).astype(mdtype))
X = rnd.random((n, m))
X = X.astype(vdtype)
eigvals, eigvecs = lobpcg(A, X, tol=1e-2, largest=False)
assert_allclose(eigvals, np.arange(1, 1 + m), atol=1e-1)
# eigenvectors must be nearly real in any case
assert_allclose(np.sum(np.abs(eigvecs - eigvecs.conj())), 0, atol=1e-2)
@pytest.mark.thread_unsafe
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_inplace_warning():
"""Check lobpcg gives a warning in '_b_orthonormalize'
that in-place orthogonalization is impossible due to dtype mismatch.
"""
rnd = np.random.RandomState(0)
n = 6
m = 1
vals = -np.arange(1, n + 1)
A = dia_array(([vals], [0]), shape=(n, n))
A = A.astype(np.cdouble)
X = rnd.standard_normal((n, m))
with pytest.warns(UserWarning, match="Inplace update"):
eigvals, _ = lobpcg(A, X, maxiter=2, verbosityLevel=1)
@pytest.mark.thread_unsafe
def test_maxit():
"""Check lobpcg if maxit=maxiter runs maxiter iterations and
if maxit=None runs 20 iterations (the default)
by checking the size of the iteration history output, which should
be the number of iterations plus 3 (initial, final, and postprocessing)
typically when maxiter is small and the choice of the best is passive.
"""
rnd = np.random.RandomState(0)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = dia_array(([vals], [0]), shape=(n, n))
A = A.astype(np.float32)
X = rnd.standard_normal((n, m))
X = X.astype(np.float64)
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
for maxiter in range(1, 4):
with pytest.warns(UserWarning, match=msg):
_, _, l_h, r_h = lobpcg(A, X, tol=1e-8, maxiter=maxiter,
retLambdaHistory=True,
retResidualNormsHistory=True)
assert_allclose(np.shape(l_h)[0], maxiter+3)
assert_allclose(np.shape(r_h)[0], maxiter+3)
with pytest.warns(UserWarning, match=msg):
l, _, l_h, r_h = lobpcg(A, X, tol=1e-8,
retLambdaHistory=True,
retResidualNormsHistory=True)
assert_allclose(np.shape(l_h)[0], 20+3)
assert_allclose(np.shape(r_h)[0], 20+3)
# Check that eigenvalue output is the last one in history
assert_allclose(l, l_h[-1])
# Make sure that both history outputs are lists
assert isinstance(l_h, list)
assert isinstance(r_h, list)
# Make sure that both history lists are arrays-like
assert_allclose(np.shape(l_h), np.shape(np.asarray(l_h)))
assert_allclose(np.shape(r_h), np.shape(np.asarray(r_h)))
@pytest.mark.xslow
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_sakurai():
"""Check lobpcg and eighs accuracy for the Sakurai example
already used in `benchmarks/benchmarks/sparse_linalg_lobpcg.py`.
"""
n = 50
tol = 100 * n * n * n* np.finfo(float).eps
sakurai_obj = Sakurai(n, dtype='int')
A = sakurai_obj
m = 3
ee = sakurai_obj.eigenvalues(3)
rng = np.random.default_rng(0)
X = rng.normal(size=(n, m))
el, _ = lobpcg(A, X, tol=1e-9, maxiter=5000, largest=False)
accuracy = max(abs(ee - el) / ee)
assert_allclose(accuracy, 0., atol=tol)
a_l = LinearOperator((n, n), matvec=A, matmat=A, dtype='float64')
ea, _ = eigsh(a_l, k=m, which='SA', tol=1e-9, maxiter=15000,
v0 = rng.normal(size=(n, 1)))
accuracy = max(abs(ee - ea) / ee)
assert_allclose(accuracy, 0., atol=tol)
@pytest.mark.parametrize("n", [500, 1000])
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_sakurai_inverse(n):
"""Check lobpcg and eighs accuracy for the sakurai_inverse example
already used in `benchmarks/benchmarks/sparse_linalg_lobpcg.py`.
"""
def a(x):
return cho_solve_banded((c, False), x)
tol = 100 * n * n * n* np.finfo(float).eps
sakurai_obj = Sakurai(n)
A = sakurai_obj.tobanded().astype(np.float64)
m = 3
ee = sakurai_obj.eigenvalues(3)
rng = np.random.default_rng(0)
X = rng.normal(size=(n, m))
c = cholesky_banded(A)
el, _ = lobpcg(a, X, tol=1e-9, maxiter=8)
accuracy = max(abs(ee - 1. / el) / ee)
assert_allclose(accuracy, 0., atol=tol)
a_l = LinearOperator((n, n), matvec=a, matmat=a, dtype='float64')
ea, _ = eigsh(a_l, k=m, which='LA', tol=1e-9, maxiter=8,
v0 = rng.normal(size=(n, 1)))
accuracy = max(abs(ee - np.sort(1. / ea)) / ee)
assert_allclose(accuracy, 0., atol=tol)
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize("n", [10, 20, 128, 256, 512, 1024, 2048])
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_MikotaPair(n):
"""Check lobpcg and eighs accuracy for the Mikota example
already used in `benchmarks/benchmarks/sparse_linalg_lobpcg.py`.
"""
def a(x):
return cho_solve_banded((c, False), x)
mik = MikotaPair(n)
mik_k = mik.k
mik_m = mik.m
Ac = mik_k
Bc = mik_m
Ab = mik_k.tobanded()
eigenvalues = mik.eigenvalues
if n == 10:
m = 3 # lobpcg calls eigh
elif n == 20:
m = 2
else:
m = 10
ee = eigenvalues(m)
tol = 100 * m * n * n * np.finfo(float).eps
rng = np.random.default_rng(0)
X = rng.normal(size=(n, m))
c = cholesky_banded(Ab.astype(np.float32))
el, _ = lobpcg(Ac, X, Bc, M=a, tol=1e-4,
maxiter=40, largest=False)
accuracy = max(abs(ee - el) / ee)
assert_allclose(accuracy, 0., atol=tol)
B = LinearOperator((n, n), matvec=Bc, matmat=Bc, dtype='float64')
A = LinearOperator((n, n), matvec=Ac, matmat=Ac, dtype='float64')
c = cholesky_banded(Ab)
a_l = LinearOperator((n, n), matvec=a, matmat=a, dtype='float64')
ea, _ = eigsh(B, k=m, M=A, Minv=a_l, which='LA', tol=1e-4, maxiter=50,
v0 = rng.normal(size=(n, 1)))
accuracy = max(abs(ee - np.sort(1./ea)) / ee)
assert_allclose(accuracy, 0., atol=tol)
@pytest.mark.slow
@pytest.mark.parametrize("n", [15])
@pytest.mark.parametrize("m", [1, 2])
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_diagonal_data_types(n, m):
"""Check lobpcg for diagonal matrices for all matrix types.
Constraints are imposed, so a dense eigensolver eig cannot run.
"""
rnd = np.random.RandomState(0)
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A and B to be diagonal.
vals = np.arange(1, n + 1)
list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
for s_f_i, s_f in enumerate(list_sparse_format):
As64 = dia_array(([vals * vals], [0]), shape=(n, n)).asformat(s_f)
As32 = As64.astype(np.float32)
Af64 = As64.toarray()
Af32 = Af64.astype(np.float32)
def As32f(x):
return As32 @ x
As32LO = LinearOperator(matvec=As32f,
matmat=As32f,
shape=(n, n),
dtype=As32.dtype)
listA = [Af64, As64, Af32, As32, As32f, As32LO, lambda v: As32 @ v]
Bs64 = dia_array(([vals], [0]), shape=(n, n)).asformat(s_f)
Bf64 = Bs64.toarray()
Bs32 = Bs64.astype(np.float32)
def Bs32f(x):
return Bs32 @ x
Bs32LO = LinearOperator(matvec=Bs32f,
matmat=Bs32f,
shape=(n, n),
dtype=Bs32.dtype)
listB = [Bf64, Bs64, Bs32, Bs32f, Bs32LO, lambda v: Bs32 @ v]
# Define the preconditioner function as LinearOperator.
Ms64 = dia_array(([1./vals], [0]), shape=(n, n)).asformat(s_f)
def Ms64precond(x):
return Ms64 @ x
Ms64precondLO = LinearOperator(matvec=Ms64precond,
matmat=Ms64precond,
shape=(n, n),
dtype=Ms64.dtype)
Mf64 = Ms64.toarray()
def Mf64precond(x):
return Mf64 @ x
Mf64precondLO = LinearOperator(matvec=Mf64precond,
matmat=Mf64precond,
shape=(n, n),
dtype=Mf64.dtype)
Ms32 = Ms64.astype(np.float32)
def Ms32precond(x):
return Ms32 @ x
Ms32precondLO = LinearOperator(matvec=Ms32precond,
matmat=Ms32precond,
shape=(n, n),
dtype=Ms32.dtype)
Mf32 = Ms32.toarray()
def Mf32precond(x):
return Mf32 @ x
Mf32precondLO = LinearOperator(matvec=Mf32precond,
matmat=Mf32precond,
shape=(n, n),
dtype=Mf32.dtype)
listM = [None, Ms64, Ms64precondLO, Mf64precondLO, Ms64precond,
Ms32, Ms32precondLO, Mf32precondLO, Ms32precond]
# Setup matrix of the initial approximation to the eigenvectors
# (cannot be sparse array).
Xf64 = rnd.random((n, m))
Xf32 = Xf64.astype(np.float32)
listX = [Xf64, Xf32]
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors (cannot be sparse array).
m_excluded = 3
Yf64 = np.eye(n, m_excluded, dtype=float)
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
listY = [Yf64, Yf32]
tests = list(itertools.product(listA, listB, listM, listX, listY))
for A, B, M, X, Y in tests:
# This is one of the slower tests because there are >1,000 configs
# to test here. Flip a biased coin to decide whether to run each
# test to get decent coverage in less time.
if rnd.random() < 0.98:
continue # too many tests
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
maxiter=100, largest=False)
assert_allclose(eigvals,
np.arange(1 + m_excluded, 1 + m_excluded + m),
atol=1e-5)

View file

@ -0,0 +1,886 @@
import re
import copy
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_array_equal
import pytest
from scipy.linalg import svd, null_space
from scipy.sparse import csc_array, issparse, dia_array, random_array
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg import svds
from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence
# --- Helper Functions / Classes ---
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if issparse(m):
m = m.toarray()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError(f"unknown which={which!r}")
return u[:, ii], s[ii], vh[ii]
def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False,
check_svd=True, atol=1e-10, rtol=1e-7):
n, m = A.shape
# Check shapes.
assert_equal(u.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(vh.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (u*s).dot(vh)
assert_equal(A_rebuilt.shape, A.shape)
if check_usvh_A:
assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol)
# Check that u is a semi-orthogonal matrix.
uh_u = np.dot(u.T.conj(), u)
assert_equal(uh_u.shape, (k, k))
assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol)
# Check that vh is a semi-orthogonal matrix.
vh_v = np.dot(vh, vh.T.conj())
assert_equal(vh_v.shape, (k, k))
assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol)
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
if check_svd:
u2, s2, vh2 = sorted_svd(A, k, which)
assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol)
assert_allclose(s, s2, atol=atol, rtol=rtol)
assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol)
def _check_svds_n(A, k, u, s, vh, which="LM", check_res=True,
check_svd=True, atol=1e-10, rtol=1e-7):
n, m = A.shape
# Check shapes.
assert_equal(u.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(vh.shape, (k, m))
# Check that u is a semi-orthogonal matrix.
uh_u = np.dot(u.T.conj(), u)
assert_equal(uh_u.shape, (k, k))
error = np.sum(np.abs(uh_u - np.identity(k))) / (k * k)
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
# Check that vh is a semi-orthogonal matrix.
vh_v = np.dot(vh, vh.T.conj())
assert_equal(vh_v.shape, (k, k))
error = np.sum(np.abs(vh_v - np.identity(k))) / (k * k)
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
# Check residuals
if check_res:
ru = A.T.conj() @ u - vh.T.conj() * s
rus = np.sum(np.abs(ru)) / (n * k)
rvh = A @ vh.T.conj() - u * s
rvhs = np.sum(np.abs(rvh)) / (m * k)
assert_allclose(rus, 0.0, atol=atol, rtol=rtol)
assert_allclose(rvhs, 0.0, atol=atol, rtol=rtol)
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
if check_svd:
u2, s2, vh2 = sorted_svd(A, k, which)
assert_allclose(s, s2, atol=atol, rtol=rtol)
A_rebuilt_svd = (u2*s2).dot(vh2)
A_rebuilt = (u*s).dot(vh)
assert_equal(A_rebuilt.shape, A.shape)
error = np.sum(np.abs(A_rebuilt_svd - A_rebuilt)) / (k * k)
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def _matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def _rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
# --- Test Input Validation ---
# Tests input validation on parameters `k` and `which`.
# Needs better input validation checks for all other parameters.
class SVDSCommonTests:
solver = None
# some of these IV tests could run only once, say with solver=None
_A_empty_msg = "`A` must not be empty."
_A_dtype_msg = "`A` must be of numeric data type"
_A_type_msg = "type not understood"
_A_ndim_msg = "array must have ndim <= 2"
_A_validation_inputs = [
(np.asarray([[]]), ValueError, _A_empty_msg),
(np.array([['a', 'b'], ['c', 'd']], dtype='object'), ValueError, _A_dtype_msg),
("hi", TypeError, _A_type_msg),
(np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)]
@pytest.mark.parametrize("args", _A_validation_inputs)
def test_svds_input_validation_A(self, args):
A, error_type, message = args
with pytest.raises(error_type, match=message):
svds(A, k=1, solver=self.solver, rng=0)
@pytest.mark.parametrize("which", ["LM", "SM"])
def test_svds_int_A(self, which):
A = np.asarray([[1, 2], [3, 4]])
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
res = svds(A, k=1, which=which, solver=self.solver, rng=0)
else:
res = svds(A, k=1, which=which, solver=self.solver, rng=0)
_check_svds(A, 1, *res, which=which, atol=8e-10)
def test_svds_diff0_docstring_example(self):
def diff0(a):
return np.diff(a, axis=0)
def diff0t(a):
if a.ndim == 1:
a = a[:,np.newaxis] # Turn 1D into 2D array
d = np.zeros((a.shape[0] + 1, a.shape[1]), dtype=a.dtype)
d[0, :] = - a[0, :]
d[1:-1, :] = a[0:-1, :] - a[1:, :]
d[-1, :] = a[-1, :]
return d
def diff0_func_aslo_def(n):
return LinearOperator(matvec=diff0,
matmat=diff0,
rmatvec=diff0t,
rmatmat=diff0t,
shape=(n - 1, n))
n = 100
diff0_func_aslo = diff0_func_aslo_def(n)
# preserve a use of legacy keyword `random_state` during SPEC 7 transition
u, s, _ = svds(diff0_func_aslo, k=3, which='SM', random_state=0)
se = 2. * np.sin(np.pi * np.arange(1, 4) / (2. * n))
ue = np.sqrt(2 / n) * np.sin(np.pi * np.outer(np.arange(1, n),
np.arange(1, 4)) / n)
assert_allclose(s, se, atol=1e-3)
assert_allclose(np.abs(u), np.abs(ue), atol=1e-6)
@pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"])
def test_svds_input_validation_k_1(self, k):
rng = np.random.default_rng(0)
A = rng.random((4, 3))
# propack can do complete SVD
if self.solver == 'propack' and k == 3:
res = svds(A, k=k, solver=self.solver, rng=0)
_check_svds(A, k, *res, check_usvh_A=True, check_svd=True)
return
message = ("`k` must be an integer satisfying")
with pytest.raises(ValueError, match=message):
svds(A, k=k, solver=self.solver, rng=0)
def test_svds_input_validation_k_2(self):
# I think the stack trace is reasonable when `k` can't be converted
# to an int.
message = "int() argument must be a"
with pytest.raises(TypeError, match=re.escape(message)):
svds(np.eye(10), k=[], solver=self.solver, rng=0)
message = "invalid literal for int()"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), k="hi", solver=self.solver, rng=0)
@pytest.mark.parametrize("tol", (-1, np.inf, np.nan))
def test_svds_input_validation_tol_1(self, tol):
message = "`tol` must be a non-negative floating point value."
with pytest.raises(ValueError, match=message):
svds(np.eye(10), tol=tol, solver=self.solver, rng=0)
@pytest.mark.parametrize("tol", ([], 'hi'))
def test_svds_input_validation_tol_2(self, tol):
# I think the stack trace is reasonable here
message = "'<' not supported between instances"
with pytest.raises(TypeError, match=message):
svds(np.eye(10), tol=tol, solver=self.solver, rng=0)
@pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0))
def test_svds_input_validation_which(self, which):
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/4590
# Function was not checking for eigenvalue type and unintended
# values could be returned.
with pytest.raises(ValueError, match="`which` must be in"):
svds(np.eye(10), which=which, solver=self.solver, rng=0)
@pytest.mark.parametrize("transpose", (True, False))
@pytest.mark.parametrize("n", range(4, 9))
def test_svds_input_validation_v0_1(self, transpose, n):
rng = np.random.default_rng(0)
A = rng.random((5, 7))
v0 = rng.random(n)
if transpose:
A = A.T
k = 2
message = "`v0` must have shape"
required_length = (A.shape[0] if self.solver == 'propack'
else min(A.shape))
if n != required_length:
with pytest.raises(ValueError, match=message):
svds(A, k=k, v0=v0, solver=self.solver, rng=0)
def test_svds_input_validation_v0_2(self):
A = np.ones((10, 10))
v0 = np.ones((1, 10))
message = "`v0` must have shape"
with pytest.raises(ValueError, match=message):
svds(A, k=1, v0=v0, solver=self.solver, rng=0)
@pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int)))
def test_svds_input_validation_v0_3(self, v0):
A = np.ones((10, 10))
message = "`v0` must be of floating or complex floating data type."
with pytest.raises(ValueError, match=message):
svds(A, k=1, v0=v0, solver=self.solver, rng=0)
@pytest.mark.parametrize("maxiter", (-1, 0, 5.5))
def test_svds_input_validation_maxiter_1(self, maxiter):
message = ("`maxiter` must be a positive integer.")
with pytest.raises(ValueError, match=message):
svds(np.eye(10), maxiter=maxiter, solver=self.solver, rng=0)
def test_svds_input_validation_maxiter_2(self):
# I think the stack trace is reasonable when `k` can't be converted
# to an int.
message = "int() argument must be a"
with pytest.raises(TypeError, match=re.escape(message)):
svds(np.eye(10), maxiter=[], solver=self.solver, rng=0)
message = "invalid literal for int()"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), maxiter="hi", solver=self.solver, rng=0)
@pytest.mark.parametrize("rsv", ('ekki', 10))
def test_svds_input_validation_return_singular_vectors(self, rsv):
message = "`return_singular_vectors` must be in"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver, rng=0)
# --- Test Parameters ---
@pytest.mark.thread_unsafe
@pytest.mark.parametrize("k", [3, 5])
@pytest.mark.parametrize("which", ["LM", "SM"])
def test_svds_parameter_k_which(self, k, which):
# check that the `k` parameter sets the number of eigenvalues/
# eigenvectors returned.
# Also check that the `which` parameter sets whether the largest or
# smallest eigenvalues are returned
rng = np.random.default_rng(0)
A = rng.random((10, 10))
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
res = svds(A, k=k, which=which, solver=self.solver, rng=0)
else:
res = svds(A, k=k, which=which, solver=self.solver, rng=0)
_check_svds(A, k, *res, which=which, atol=1e-9, rtol=2e-13)
@pytest.mark.filterwarnings("ignore:Exited",
reason="Ignore LOBPCG early exit.")
# loop instead of parametrize for simplicity
def test_svds_parameter_tol(self):
# check the effect of the `tol` parameter on solver accuracy by solving
# the same problem with varying `tol` and comparing the eigenvalues
# against ground truth computed
n = 100 # matrix size
k = 3 # number of eigenvalues to check
# generate a random, sparse-ish matrix
# effect isn't apparent for matrices that are too small
rng = np.random.default_rng(0)
A = rng.random((n, n))
A[A > .1] = 0
A = A @ A.T
_, s, _ = svd(A) # calculate ground truth
# calculate the error as a function of `tol`
A = csc_array(A)
def err(tol):
_, s2, _ = svds(A, k=k, v0=np.ones(n), maxiter=1000,
solver=self.solver, tol=tol, rng=0)
return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1])
tols = [1e-4, 1e-2, 1e0] # tolerance levels to check
# for 'arpack' and 'propack', accuracies make discrete steps
accuracies = {'propack': [1e-12, 1e-6, 1e-4],
'arpack': [2.5e-15, 1e-10, 1e-10],
'lobpcg': [2e-12, 4e-2, 2]}
for tol, accuracy in zip(tols, accuracies[self.solver]):
error = err(tol)
assert error < accuracy
def test_svd_v0(self):
# check that the `v0` parameter affects the solution
n = 100
k = 1
# If k != 1, LOBPCG needs more initial vectors, which are generated
# with rng, so it does not pass w/ k >= 2.
# For some other values of `n`, the AssertionErrors are not raised
# with different v0s, which is reasonable.
rng = np.random.default_rng(0)
A = rng.random((n, n))
# with the same v0, solutions are the same, and they are accurate
# v0 takes precedence over rng
v0a = rng.random(n)
res1a = svds(A, k, v0=v0a, solver=self.solver, rng=0)
res2a = svds(A, k, v0=v0a, solver=self.solver, rng=1)
for idx in range(3):
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1a)
# with the same v0, solutions are the same, and they are accurate
v0b = rng.random(n)
res1b = svds(A, k, v0=v0b, solver=self.solver, rng=2)
res2b = svds(A, k, v0=v0b, solver=self.solver, rng=3)
for idx in range(3):
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1b)
# with different v0, solutions can be numerically different
message = "Arrays are not equal"
with pytest.raises(AssertionError, match=message):
assert_equal(res1a, res1b)
def test_svd_rng(self):
# check that the `rng` parameter affects the solution
# Admittedly, `n` and `k` are chosen so that all solver pass all
# these checks. That's a tall order, since LOBPCG doesn't want to
# achieve the desired accuracy and ARPACK often returns the same
# singular values/vectors for different v0.
n = 100
k = 1
rng = np.random.default_rng(0)
A = rng.random((n, n))
# with the same rng, solutions are the same and accurate
res1a = svds(A, k, solver=self.solver, rng=0)
res2a = svds(A, k, solver=self.solver, rng=0)
for idx in range(3):
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1a)
# with the same rng, solutions are the same and accurate
res1b = svds(A, k, solver=self.solver, rng=1)
res2b = svds(A, k, solver=self.solver, rng=1)
for idx in range(3):
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1b)
# with different rng, solutions can be numerically different
message = "Arrays are not equal"
with pytest.raises(AssertionError, match=message):
assert_equal(res1a, res1b)
def test_svd_rng_2(self):
n = 100
k = 1
rng = np.random.default_rng(234981)
A = rng.random((n, n))
rng_2 = copy.deepcopy(rng)
# with the same rng, solutions are the same and accurate
res1a = svds(A, k, solver=self.solver, rng=rng)
res2a = svds(A, k, solver=self.solver, rng=rng_2)
for idx in range(3):
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
_check_svds(A, k, *res1a)
@pytest.mark.filterwarnings("ignore:Exited",
reason="Ignore LOBPCG early exit.")
def test_svd_rng_3(self):
n = 100
k = 5
rng1 = np.random.default_rng(0)
rng2 = np.random.default_rng(234832)
A = rng1.random((n, n))
# rng in different state produces accurate - but not
# not necessarily identical - results
res1a = svds(A, k, solver=self.solver, rng=rng1, maxiter=1000)
res2a = svds(A, k, solver=self.solver, rng=rng2, maxiter=1000)
_check_svds(A, k, *res1a, atol=2e-7)
_check_svds(A, k, *res2a, atol=2e-7)
message = "Arrays are not equal"
with pytest.raises(AssertionError, match=message):
assert_equal(res1a, res2a)
@pytest.mark.thread_unsafe
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
def test_svd_maxiter(self):
# check that maxiter works as expected: should not return accurate
# solution after 1 iteration, but should with default `maxiter`
A = np.diag(np.arange(9)).astype(np.float64)
k = 1
u, s, vh = sorted_svd(A, k)
# Use default maxiter by default
maxiter = None
if self.solver == 'arpack':
message = "ARPACK error -1: No convergence"
with pytest.raises(ArpackNoConvergence, match=message):
svds(A, k, ncv=3, maxiter=1, solver=self.solver, rng=0)
elif self.solver == 'lobpcg':
# Set maxiter higher so test passes without changing
# default and breaking backward compatibility (gh-20221)
maxiter = 30
with pytest.warns(UserWarning, match="Exited at iteration"):
svds(A, k, maxiter=1, solver=self.solver, rng=0)
elif self.solver == 'propack':
message = "k=1 singular triplets did not converge within"
with pytest.raises(np.linalg.LinAlgError, match=message):
svds(A, k, maxiter=1, solver=self.solver, rng=0)
ud, sd, vhd = svds(A, k, solver=self.solver, maxiter=maxiter, rng=0)
_check_svds(A, k, ud, sd, vhd, atol=1e-8)
assert_allclose(np.abs(ud), np.abs(u), atol=1e-8)
assert_allclose(np.abs(vhd), np.abs(vh), atol=1e-8)
assert_allclose(np.abs(sd), np.abs(s), atol=1e-9)
@pytest.mark.thread_unsafe
@pytest.mark.parametrize("rsv", (True, False, 'u', 'vh'))
@pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5)))
def test_svd_return_singular_vectors(self, rsv, shape):
# check that the return_singular_vectors parameter works as expected
rng = np.random.default_rng(0)
A = rng.random(shape)
k = 2
M, N = shape
u, s, vh = sorted_svd(A, k)
respect_u = True if self.solver == 'propack' else M <= N
respect_vh = True if self.solver == 'propack' else M > N
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
if rsv is False:
s2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
assert_allclose(s2, s)
elif rsv == 'u' and respect_u:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
assert vh2 is None
elif rsv == 'vh' and respect_vh:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
assert u2 is None
assert_allclose(s2, s)
assert_allclose(np.abs(vh2), np.abs(vh))
else:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
if u2 is not None:
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
if vh2 is not None:
assert_allclose(np.abs(vh2), np.abs(vh))
else:
if rsv is False:
s2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
assert_allclose(s2, s)
elif rsv == 'u' and respect_u:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
assert vh2 is None
elif rsv == 'vh' and respect_vh:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
assert u2 is None
assert_allclose(s2, s)
assert_allclose(np.abs(vh2), np.abs(vh))
else:
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
solver=self.solver, rng=rng)
if u2 is not None:
assert_allclose(np.abs(u2), np.abs(u))
assert_allclose(s2, s)
if vh2 is not None:
assert_allclose(np.abs(vh2), np.abs(vh))
# --- Test Basic Functionality ---
# Tests the accuracy of each solver for real and complex matrices provided
# as list, dense array, sparse matrix, and LinearOperator.
A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]]
A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]]
@pytest.mark.thread_unsafe
@pytest.mark.filterwarnings("ignore:k >= N - 1",
reason="needed to demonstrate #16725")
@pytest.mark.parametrize('A', (A1, A2))
@pytest.mark.parametrize('k', range(1, 5))
# PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM"))
@pytest.mark.parametrize('real', (True, False))
@pytest.mark.parametrize('transpose', (False, True))
# In gh-14299, it was suggested the `svds` should _not_ work with lists
@pytest.mark.parametrize('lo_type', (np.asarray, csc_array,
aslinearoperator))
def test_svd_simple(self, A, k, real, transpose, lo_type):
A = np.asarray(A)
A = np.real(A) if real else A
A = A.T if transpose else A
A2 = lo_type(A)
# could check for the appropriate errors, but that is tested above
if k > min(A.shape):
pytest.skip("`k` cannot be greater than `min(A.shape)`")
if self.solver != 'propack' and k >= min(A.shape):
pytest.skip("Only PROPACK supports complete SVD")
if self.solver == 'arpack' and not real and k == min(A.shape) - 1:
pytest.skip("#16725")
atol = 3e-10
if self.solver == 'propack':
atol = 3e-9 # otherwise test fails on Linux aarch64 (see gh-19855)
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
u, s, vh = svds(A2, k, solver=self.solver, rng=0)
else:
u, s, vh = svds(A2, k, solver=self.solver, rng=0)
_check_svds(A, k, u, s, vh, atol=atol)
@pytest.mark.thread_unsafe
def test_svd_linop(self):
solver = self.solver
nmks = [(6, 7, 3),
(9, 5, 4),
(10, 8, 5)]
def reorder(args):
U, s, VH = args
j = np.argsort(s)
return U[:, j], s[j], VH[j, :]
for n, m, k in nmks:
# Test svds on a LinearOperator.
A = np.random.RandomState(52).randn(n, m)
L = CheckingLinearOperator(A)
if solver == 'propack':
v0 = np.ones(n)
else:
v0 = np.ones(min(A.shape))
if solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver, rng=0))
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver, rng=0))
else:
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver, rng=0))
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver, rng=0))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
# Try again with which="SM".
A = np.random.RandomState(1909).randn(n, m)
L = CheckingLinearOperator(A)
# TODO: arpack crashes when v0=v0, which="SM"
kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {}
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
rng=0, **kwargs))
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
rng=0, **kwargs))
else:
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
rng=0, **kwargs))
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
rng=0, **kwargs))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1 + 1, s2 + 1)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
if k < min(n, m) - 1:
# Complex input and explicit which="LM".
for (dt, eps) in [(complex, 1e-7), (np.complex64, 3e-3)]:
rng = np.random.RandomState(1648)
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
L = CheckingLinearOperator(A)
if self.solver == 'lobpcg':
with pytest.warns(UserWarning,
match="The problem size"):
U1, s1, VH1 = reorder(svds(A, k, which="LM",
solver=solver, rng=0))
U2, s2, VH2 = reorder(svds(L, k, which="LM",
solver=solver, rng=0))
else:
U1, s1, VH1 = reorder(svds(A, k, which="LM",
solver=solver, rng=0))
U2, s2, VH2 = reorder(svds(L, k, which="LM",
solver=solver, rng=0))
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
assert_allclose(s1, s2, rtol=eps)
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)),
rtol=eps)
SHAPES = ((100, 100), (100, 101), (101, 100))
@pytest.mark.filterwarnings("ignore:Exited at iteration")
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
@pytest.mark.parametrize("shape", SHAPES)
# ARPACK supports only dtype float, complex, or np.float32
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
def test_small_sigma_sparse(self, shape, dtype):
# https://github.com/scipy/scipy/pull/11829
solver = self.solver
# 2do: PROPACK fails orthogonality of singular vectors
# if dtype == complex and self.solver == 'propack':
# pytest.skip("PROPACK unsupported for complex dtype")
rng = np.random.default_rng(0)
k = 5
(m, n) = shape
S = random_array(shape=(m, n), density=0.1, rng=rng)
if dtype is complex:
S = + 1j * random_array(shape=(m, n), density=0.1, rng=rng)
e = np.ones(m)
e[0:5] *= 1e1 ** np.arange(-5, 0, 1)
S = dia_array((e, 0), shape=(m, m)) @ S
S = S.astype(dtype)
u, s, vh = svds(S, k, which='SM', solver=solver, maxiter=1000, rng=0)
c_svd = False # partial SVD can be different from full SVD
_check_svds_n(S, k, u, s, vh, which="SM", check_svd=c_svd, atol=2e-1)
# --- Test Edge Cases ---
# Checks a few edge cases.
@pytest.mark.thread_unsafe
@pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6)))
@pytest.mark.parametrize("dtype", (float, complex))
def test_svd_LM_ones_matrix(self, shape, dtype):
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
n, m = shape
A = np.ones((n, m), dtype=dtype)
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U, s, VH = svds(A, k, solver=self.solver, rng=0)
else:
U, s, VH = svds(A, k, solver=self.solver, rng=0)
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
s = np.array(sorted(s)[:-1]) + 1
z = np.ones_like(s)
assert_allclose(s, z)
@pytest.mark.thread_unsafe
@pytest.mark.filterwarnings("ignore:k >= N - 1",
reason="needed to demonstrate #16725")
@pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2)))
@pytest.mark.parametrize("dtype", (float, complex))
def test_zero_matrix(self, shape, dtype):
# Check that svds can deal with matrices containing only zeros;
# see https://github.com/scipy/scipy/issues/3452/
# shape = (4, 2) is included because it is the particular case
# reported in the issue
k = 1
n, m = shape
A = np.zeros((n, m), dtype=dtype)
if (self.solver == 'arpack'):
pytest.skip('See gh-21110.')
if (self.solver == 'arpack' and dtype is complex
and k == min(A.shape) - 1):
pytest.skip("#16725")
if self.solver == 'propack':
pytest.skip("PROPACK failures unrelated to PR #16712")
if self.solver == 'lobpcg':
with pytest.warns(UserWarning, match="The problem size"):
U, s, VH = svds(A, k, solver=self.solver, rng=0)
else:
U, s, VH = svds(A, k, solver=self.solver, rng=0)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
# Check that the singular values are zero.
assert_array_equal(s, 0)
@pytest.mark.parametrize("shape", ((20, 20), (20, 21), (21, 20)))
# ARPACK supports only dtype float, complex, or np.float32
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
@pytest.mark.filterwarnings("ignore:Exited",
reason="Ignore LOBPCG early exit.")
def test_small_sigma(self, shape, dtype):
rng = np.random.default_rng(179847540)
A = rng.random(shape).astype(dtype)
u, _, vh = svd(A, full_matrices=False)
if dtype == np.float32:
e = 10.0
else:
e = 100.0
t = e**(-np.arange(len(vh))).astype(dtype)
A = (u*t).dot(vh)
k = 4
u, s, vh = svds(A, k, solver=self.solver, maxiter=100, rng=0)
t = np.sum(s > 0)
assert_equal(t, k)
# LOBPCG needs larger atol and rtol to pass
_check_svds_n(A, k, u, s, vh, atol=1e-3, rtol=1e0, check_svd=False)
# ARPACK supports only dtype float, complex, or np.float32
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
def test_small_sigma2(self, dtype):
rng = np.random.default_rng(179847540)
# create a 10x10 singular matrix with a 4-dim null space
dim = 4
size = 10
x = rng.random((size, size-dim))
y = x[:, :dim] * rng.random(dim)
mat = np.hstack((x, y))
mat = mat.astype(dtype)
nz = null_space(mat)
assert_equal(nz.shape[1], dim)
# Tolerances atol and rtol adjusted to pass np.float32
# Use non-sparse svd
u, s, vh = svd(mat)
# Singular values are 0:
assert_allclose(s[-dim:], 0, atol=1e-6, rtol=1e0)
# Smallest right singular vectors in null space:
assert_allclose(mat @ vh[-dim:, :].T, 0, atol=1e-6, rtol=1e0)
# Smallest singular values should be 0
sp_mat = csc_array(mat)
su, ss, svh = svds(sp_mat, k=dim, which='SM', solver=self.solver, rng=0)
# Smallest dim singular values are 0:
assert_allclose(ss, 0, atol=1e-5, rtol=1e0)
# Smallest singular vectors via svds in null space:
n, m = mat.shape
if n < m: # else the assert fails with some libraries unclear why
assert_allclose(sp_mat.transpose() @ su, 0, atol=1e-5, rtol=1e0)
assert_allclose(sp_mat @ svh.T, 0, atol=1e-5, rtol=1e0)
# --- Perform tests with each solver ---
class Test_SVDS_once:
@pytest.mark.parametrize("solver", ['ekki', object])
def test_svds_input_validation_solver(self, solver):
message = "solver must be one of"
with pytest.raises(ValueError, match=message):
svds(np.ones((3, 4)), k=2, solver=solver, rng=0)
class Test_SVDS_ARPACK(SVDSCommonTests):
def setup_method(self):
self.solver = 'arpack'
@pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"])
def test_svds_input_validation_ncv_1(self, ncv):
rng = np.random.default_rng(0)
A = rng.random((6, 7))
k = 3
if ncv in {4, 5}:
u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver, rng=0)
# partial decomposition, so don't check that u@diag(s)@vh=A;
# do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
_check_svds(A, k, u, s, vh)
else:
message = ("`ncv` must be an integer satisfying")
with pytest.raises(ValueError, match=message):
svds(A, k=k, ncv=ncv, solver=self.solver, rng=0)
def test_svds_input_validation_ncv_2(self):
# I think the stack trace is reasonable when `ncv` can't be converted
# to an int.
message = "int() argument must be a"
with pytest.raises(TypeError, match=re.escape(message)):
svds(np.eye(10), ncv=[], solver=self.solver, rng=0)
message = "invalid literal for int()"
with pytest.raises(ValueError, match=message):
svds(np.eye(10), ncv="hi", solver=self.solver, rng=0)
# I can't see a robust relationship between `ncv` and relevant outputs
# (e.g. accuracy, time), so no test of the parameter.
class Test_SVDS_LOBPCG(SVDSCommonTests):
def setup_method(self):
self.solver = 'lobpcg'
class Test_SVDS_PROPACK(SVDSCommonTests):
def setup_method(self):
self.solver = 'propack'
def test_svd_LM_ones_matrix(self):
message = ("PROPACK does not return orthonormal singular vectors "
"associated with zero singular values.")
# There are some other issues with this matrix of all ones, e.g.
# `which='sm'` and `k=1` returns the largest singular value
pytest.xfail(message)
def test_svd_LM_zeros_matrix(self):
message = ("PROPACK does not return orthonormal singular vectors "
"associated with zero singular values.")
pytest.xfail(message)

View file

@ -0,0 +1,816 @@
"""Compute the action of the matrix exponential."""
from warnings import warn
import numpy as np
import scipy.linalg
import scipy.sparse.linalg
from scipy.linalg._decomp_qr import qr
from scipy.sparse._sputils import is_pydata_spmatrix
from scipy.sparse.linalg import aslinearoperator
from scipy.sparse.linalg._interface import IdentityOperator
from scipy.sparse.linalg._onenormest import onenormest
__all__ = ['expm_multiply']
def _exact_inf_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.issparse(A):
return max(abs(A).sum(axis=1).flat)
elif is_pydata_spmatrix(A):
return max(abs(A).sum(axis=1))
else:
return np.linalg.norm(A, np.inf)
def _exact_1_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.issparse(A):
return max(abs(A).sum(axis=0).flat)
elif is_pydata_spmatrix(A):
return max(abs(A).sum(axis=0))
else:
return np.linalg.norm(A, 1)
def _trace(A):
# A compatibility function which should eventually disappear.
if is_pydata_spmatrix(A):
return A.to_scipy_sparse().trace()
else:
return A.trace()
def traceest(A, m3, seed=None):
"""Estimate `np.trace(A)` using `3*m3` matrix-vector products.
The result is not deterministic.
Parameters
----------
A : LinearOperator
Linear operator whose trace will be estimated. Has to be square.
m3 : int
Number of matrix-vector products divided by 3 used to estimate the
trace.
seed : optional
Seed for `numpy.random.default_rng`.
Can be provided to obtain deterministic results.
Returns
-------
trace : LinearOperator.dtype
Estimate of the trace
Notes
-----
This is the Hutch++ algorithm given in [1]_.
References
----------
.. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P.
Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium
on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial
and Applied Mathematics, 2021
https://doi.org/10.1137/1.9781611976496.16
"""
rng = np.random.default_rng(seed)
if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]:
raise ValueError("Expected A to be like a square matrix.")
n = A.shape[-1]
S = rng.choice([-1.0, +1.0], [n, m3])
Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic')
trQAQ = np.trace(Q.conj().T @ A.matmat(Q))
G = rng.choice([-1, +1], [n, m3])
right = G - Q@(Q.conj().T @ G)
trGAG = np.trace(right.conj().T @ A.matmat(right))
return trQAQ + trGAG/m3
def _ident_like(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.issparse(A):
# Creates a sparse matrix in dia format
out = scipy.sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
if scipy.sparse.issparse(A):
return out.asformat(A.format)
return scipy.sparse.dia_array(out).asformat(A.format)
elif is_pydata_spmatrix(A):
import sparse
return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
elif isinstance(A, scipy.sparse.linalg.LinearOperator):
return IdentityOperator(A.shape, dtype=A.dtype)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def expm_multiply(A, B, start=None, stop=None, num=None,
endpoint=None, traceA=None):
"""
Compute the action of the matrix exponential of A on B.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray, sparse array
The matrix or vector to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
traceA : scalar, optional
Trace of `A`. If not given the trace is estimated for linear operators,
or calculated exactly for sparse matrices. It is used to precondition
`A`, thus an approximate trace is acceptable.
For linear operators, `traceA` should be provided to ensure performance
as the estimation is not guaranteed to be reliable for all cases.
.. versionadded:: 1.9.0
Returns
-------
expm_A_B : ndarray
The result of the action :math:`e^{t_k A} B`.
Warns
-----
UserWarning
If `A` is a linear operator and ``traceA=None`` (default).
Notes
-----
The optional arguments defining the sequence of evenly spaced time points
are compatible with the arguments of `numpy.linspace`.
The output ndarray shape is somewhat complicated so I explain it here.
The ndim of the output could be either 1, 2, or 3.
It would be 1 if you are computing the expm action on a single vector
at a single time point.
It would be 2 if you are computing the expm action on a vector
at multiple time points, or if you are computing the expm action
on a matrix at a single time point.
It would be 3 if you want the action on a matrix with multiple
columns at multiple time points.
If multiple time points are requested, expm_A_B[0] will always
be the action of the expm at the first time point,
regardless of whether the action is on a vector or a matrix.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
"Computing the Action of the Matrix Exponential,
with an Application to Exponential Integrators."
SIAM Journal on Scientific Computing,
33 (2). pp. 488-511. ISSN 1064-8275
http://eprints.ma.man.ac.uk/1591/
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
"Computing Matrix Functions."
Acta Numerica,
19. 159-208. ISSN 0962-4929
http://eprints.ma.man.ac.uk/1451/
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import expm, expm_multiply
>>> A = csc_array([[1, 0], [0, 1]])
>>> A.toarray()
array([[1, 0],
[0, 1]], dtype=int64)
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
>>> B
array([ 0.36787944, 0.13533528])
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
array([[ 1. , 0.36787944],
[ 1.64872127, 0.60653066],
[ 2.71828183, 1. ]])
>>> expm(A).dot(B) # Verify 1st timestep
array([ 1. , 0.36787944])
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
array([ 1.64872127, 0.60653066])
>>> expm(2*A).dot(B) # Verify 3rd timestep
array([ 2.71828183, 1. ])
"""
if all(arg is None for arg in (start, stop, num, endpoint)):
X = _expm_multiply_simple(A, B, traceA=traceA)
else:
X, status = _expm_multiply_interval(A, B, start, stop, num,
endpoint, traceA=traceA)
return X
def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False):
"""
Compute the action of the matrix exponential at a single time point.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
t : float
A time point.
traceA : scalar, optional
Trace of `A`. If not given the trace is estimated for linear operators,
or calculated exactly for sparse matrices. It is used to precondition
`A`, thus an approximate trace is acceptable
balance : bool
Indicates whether or not to apply balancing.
Returns
-------
F : ndarray
:math:`e^{t A} B`
Notes
-----
This is algorithm (3.2) in Al-Mohy and Higham (2011).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError(f'shapes of matrices A {A.shape} and B {B.shape}'
' are incompatible')
ident = _ident_like(A)
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
if traceA is None:
if is_linear_operator:
warn("Trace of LinearOperator not available, it will be estimated."
" Provide `traceA` to ensure performance.", stacklevel=3)
# m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might
# speed up exponential calculation, but trace estimation is more costly
traceA = traceest(A, m3=1) if is_linear_operator else _trace(A)
mu = traceA / float(n)
A = A - mu * ident
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
"""
A helper function.
"""
if balance:
raise NotImplementedError
if tol is None:
u_d = 2 ** -53
tol = u_d
F = B
eta = np.exp(t*mu / float(s))
for i in range(s):
c1 = _exact_inf_norm(B)
for j in range(m_star):
coeff = t / float(s*(j+1))
B = coeff * A.dot(B)
c2 = _exact_inf_norm(B)
F = F + B
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
F = eta * F
B = F
return F
# This table helps to compute bounds.
# They seem to have been difficult to calculate, involving symbolic
# manipulation of equations, followed by numerical root finding.
_theta = {
# The first 30 values are from table A.3 of Computing Matrix Functions.
1: 2.29e-16,
2: 2.58e-8,
3: 1.39e-5,
4: 3.40e-4,
5: 2.40e-3,
6: 9.07e-3,
7: 2.38e-2,
8: 5.00e-2,
9: 8.96e-2,
10: 1.44e-1,
# 11
11: 2.14e-1,
12: 3.00e-1,
13: 4.00e-1,
14: 5.14e-1,
15: 6.41e-1,
16: 7.81e-1,
17: 9.31e-1,
18: 1.09,
19: 1.26,
20: 1.44,
# 21
21: 1.62,
22: 1.82,
23: 2.01,
24: 2.22,
25: 2.43,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
# The rest are from table 3.1 of
# Computing the Action of the Matrix Exponential.
35: 4.7,
40: 6.0,
45: 7.2,
50: 8.5,
55: 9.9,
}
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_multiply goes into scipy.
from scipy.sparse.linalg._onenormest import onenormest
return onenormest(aslinearoperator(A) ** p)
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
"""
Provide the operator and some norm-related information.
Parameters
----------
A : linear operator
The operator of interest.
A_1_norm : float, optional
The exact 1-norm of A.
ell : int, optional
A technical parameter controlling norm estimation quality.
scale : int, optional
If specified, return the norms of scale*A instead of A.
"""
self._A = A
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
self._scale = scale
def set_scale(self,scale):
"""
Set the scale parameter.
"""
self._scale = scale
def onenorm(self):
"""
Compute the exact 1-norm.
"""
if self._A_1_norm is None:
self._A_1_norm = _exact_1_norm(self._A)
return self._scale*self._A_1_norm
def d(self, p):
"""
Lazily estimate :math:`d_p(A) ~= || A^p ||^(1/p)`
where :math:`||.||` is the 1-norm.
"""
if p not in self._d:
est = _onenormest_matrix_power(self._A, p, self._ell)
self._d[p] = est ** (1.0 / p)
return self._scale*self._d[p]
def alpha(self, p):
"""
Lazily compute max(d(p), d(p+1)).
"""
return max(self.d(p), self.d(p+1))
def _compute_cost_div_m(m, p, norm_info):
"""
A helper function for computing bounds.
This is equation (3.10).
It measures cost in terms of the number of required matrix products.
Parameters
----------
m : int
A valid key of _theta.
p : int
A matrix power.
norm_info : LazyOperatorNormInfo
Information about 1-norms of related operators.
Returns
-------
cost_div_m : int
Required number of matrix products divided by m.
"""
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
def _compute_p_max(m_max):
"""
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
Do this in a slightly dumb way, but safe and not too slow.
Parameters
----------
m_max : int
A count related to bounds.
"""
sqrt_m_max = np.sqrt(m_max)
p_low = int(np.floor(sqrt_m_max))
p_high = int(np.ceil(sqrt_m_max + 1))
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
norm_info : LazyOperatorNormInfo
Information about norms of certain linear operators of interest.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
tol : float
Expected to be
:math:`2^{-24}` for single precision or
:math:`2^{-53}` for double precision.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
best_m : int
Related to bounds for error control.
best_s : int
Amount of scaling.
Notes
-----
This is code fragment (3.1) in Al-Mohy and Higham (2011).
The discussion of default values for m_max and ell
is given between the definitions of equation (3.11)
and the definition of equation (3.12).
"""
if ell < 1:
raise ValueError('expected ell to be a positive integer')
best_m = None
best_s = None
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
for m, theta in _theta.items():
s = int(np.ceil(norm_info.onenorm() / theta))
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
else:
# Equation (3.11).
for p in range(2, _compute_p_max(m_max) + 1):
for m in range(p*(p-1)-1, m_max+1):
if m in _theta:
s = _compute_cost_div_m(m, p, norm_info)
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
best_s = max(best_s, 1)
return best_m, best_s
def _condition_3_13(A_1_norm, n0, m_max, ell):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
A_1_norm : float
The precomputed 1-norm of A.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
value : bool
Indicates whether or not the condition has been met.
Notes
-----
This is condition (3.13) in Al-Mohy and Higham (2011).
"""
# This is the rhs of equation (3.12).
p_max = _compute_p_max(m_max)
a = 2 * ell * p_max * (p_max + 3)
# Evaluate the condition (3.13).
b = _theta[m_max] / float(n0 * m_max)
return A_1_norm <= a * b
def _expm_multiply_interval(A, B, start=None, stop=None, num=None,
endpoint=None, traceA=None, balance=False,
status_only=False):
"""
Compute the action of the matrix exponential at multiple time points.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
traceA : scalar, optional
Trace of `A`. If not given the trace is estimated for linear operators,
or calculated exactly for sparse matrices. It is used to precondition
`A`, thus an approximate trace is acceptable
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
balance : bool
Indicates whether or not to apply balancing.
status_only : bool
A flag that is set to True for some debugging and testing operations.
Returns
-------
F : ndarray
:math:`e^{t_k A} B`
status : int
An integer status for testing and debugging.
Notes
-----
This is algorithm (5.2) in Al-Mohy and Higham (2011).
There seems to be a typo, where line 15 of the algorithm should be
moved to line 6.5 (between lines 6 and 7).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError(f'shapes of matrices A {A.shape} and B {B.shape}'
' are incompatible')
ident = _ident_like(A)
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
if traceA is None:
if is_linear_operator:
warn("Trace of LinearOperator not available, it will be estimated."
" Provide `traceA` to ensure performance.", stacklevel=3)
# m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might
# speed up exponential calculation, but trace estimation is also costly
# an educated guess would need to consider the number of time points
traceA = traceest(A, m3=5) if is_linear_operator else _trace(A)
mu = traceA / float(n)
# Get the linspace samples, attempting to preserve the linspace defaults.
linspace_kwargs = {'retstep': True}
if num is not None:
linspace_kwargs['num'] = num
if endpoint is not None:
linspace_kwargs['endpoint'] = endpoint
samples, step = np.linspace(start, stop, **linspace_kwargs)
# Convert the linspace output to the notation used by the publication.
nsamples = len(samples)
if nsamples < 2:
raise ValueError('at least two time points are required')
q = nsamples - 1
h = step
t_0 = samples[0]
t_q = samples[q]
# Define the output ndarray.
# Use an ndim=3 shape, such that the last two indices
# are the ones that may be involved in level 3 BLAS operations.
X_shape = (nsamples,) + B.shape
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
t = t_q - t_0
A = A - mu * ident
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
# Compute the expm action up to the initial time point.
action_t0 = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
if scipy.sparse.issparse(action_t0):
action_t0 = action_t0.toarray()
elif is_pydata_spmatrix(action_t0):
action_t0 = action_t0.todense()
X[0] = action_t0
# Compute the expm action at the rest of the time points.
if q <= s:
if status_only:
return 0
else:
return _expm_multiply_interval_core_0(A, X,
h, mu, q, norm_info, tol, ell,n0)
elif not (q % s):
if status_only:
return 1
else:
return _expm_multiply_interval_core_1(A, X,
h, mu, m_star, s, q, tol)
elif (q % s):
if status_only:
return 2
else:
return _expm_multiply_interval_core_2(A, X,
h, mu, m_star, s, q, tol)
else:
raise Exception('internal error')
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
"""
A helper function, for the case q <= s.
"""
# Compute the new values of m_star and s which should be applied
# over intervals of size t/q
if norm_info.onenorm() == 0:
m_star, s = 0, 1
else:
norm_info.set_scale(1./q)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
norm_info.set_scale(1)
for k in range(q):
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
return X, 0
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s == 0.
"""
d = q // s
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(s):
Z = X[i*d]
K[0] = Z
high_p = 0
for k in range(1, d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p > high_p:
K[p] = h * A.dot(K[p-1]) / float(p)
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 1
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s > 0.
"""
d = q // s
j = q // d
r = q - d * j
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(j + 1):
Z = X[i*d]
K[0] = Z
high_p = 0
if i < j:
effective_d = d
else:
effective_d = r
for k in range(1, effective_d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p == high_p + 1:
K[p] = h * A.dot(K[p-1]) / float(p)
high_p = p
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 2

View file

@ -0,0 +1,920 @@
"""Abstract linear algebra library.
This module defines a class hierarchy that implements a kind of "lazy"
matrix representation, called the ``LinearOperator``. It can be used to do
linear algebra with extremely large sparse or structured matrices, without
representing those explicitly in memory. Such matrices can be added,
multiplied, transposed, etc.
As a motivating example, suppose you want have a matrix where almost all of
the elements have the value one. The standard sparse matrix representation
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
able to represent such matrices efficiently. First, we need a compact way to
represent an all-ones matrix::
>>> import numpy as np
>>> from scipy.sparse.linalg._interface import LinearOperator
>>> class Ones(LinearOperator):
... def __init__(self, shape):
... super().__init__(dtype=None, shape=shape)
... def _matvec(self, x):
... return np.repeat(x.sum(), self.shape[0])
Instances of this class emulate ``np.ones(shape)``, but using a constant
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
how this linear operator multiplies with (operates on) a vector. We can now
add this operator to a sparse matrix that stores only offsets from one::
>>> from scipy.sparse.linalg._interface import aslinearoperator
>>> from scipy.sparse import csr_array
>>> offsets = csr_array([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
>>> A.dot([1, 2, 3])
array([13, 4, 15])
The result is the same as that given by its dense, explicitly-stored
counterpart::
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
array([13, 4, 15])
Several algorithms in the ``scipy.sparse`` library are able to operate on
``LinearOperator`` instances.
"""
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy.sparse._sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix
__all__ = ['LinearOperator', 'aslinearoperator']
class LinearOperator:
"""Common interface for performing matrix vector products
Many iterative methods (e.g. `cg`, `gmres`) do not need to know the
individual entries of a matrix to solve a linear system ``A@x = b``.
Such solvers only require the computation of matrix vector
products, ``A@v`` where ``v`` is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete `LinearOperator`, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions ``(M, N)``.
matvec : callable f(v)
Returns returns ``A @ v``.
rmatvec : callable f(v)
Returns ``A^H @ v``, where ``A^H`` is the conjugate transpose of ``A``.
matmat : callable f(V)
Returns ``A @ V``, where ``V`` is a dense matrix with dimensions ``(N, K)``.
dtype : dtype
Data type of the matrix.
rmatmat : callable f(V)
Returns ``A^H @ V``, where ``V`` is a dense matrix with dimensions ``(M, K)``.
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
ndim : int
Number of dimensions (this is always 2)
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined `matvec` function must properly handle the case
where ``v`` has shape ``(N,)`` as well as the ``(N,1)`` case. The shape of
the return type is handled internally by `LinearOperator`.
It is highly recommended to explicitly specify the `dtype`, otherwise
it is determined automatically at the cost of a single matvec application
on ``int8`` zero vector using the promoted `dtype` of the output.
Python ``int`` could be difficult to automatically cast to numpy integers
in the definition of the `matvec` so the determination may be inaccurate.
It is assumed that `matmat`, `rmatvec`, and `rmatmat` would result in
the same dtype of the output given an ``int8`` input as `matvec`.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
More details regarding how to subclass a LinearOperator and several
examples of concrete LinearOperator instances can be found in the
external project `PyLops <https://pylops.readthedocs.io>`_.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import LinearOperator
>>> def mv(v):
... return np.array([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=int8>
>>> A.matvec(np.ones(2))
array([ 2., 3.])
>>> A @ np.ones(2)
array([ 2., 3.])
"""
ndim = 2
# Necessary for right matmul with numpy arrays.
__array_ufunc__ = None
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return super().__new__(_CustomLinearOperator)
else:
obj = super().__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
warnings.warn("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.",
category=RuntimeWarning, stacklevel=2)
return obj
def __init__(self, dtype, shape):
"""Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
"""
if dtype is not None:
dtype = np.dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError(f"invalid shape {shape!r} (must be 2-d)")
self.dtype = dtype
self.shape = shape
def _init_dtype(self):
"""Determine the dtype by executing `matvec` on an `int8` test vector.
In `np.promote_types` hierarchy, the type `int8` is the smallest,
so we call `matvec` on `int8` and use the promoted dtype of the output
to set the default `dtype` of the `LinearOperator`.
We assume that `matmat`, `rmatvec`, and `rmatmat` would result in
the same dtype of the output given an `int8` input as `matvec`.
Called from subclasses at the end of the __init__ routine.
"""
if self.dtype is None:
v = np.zeros(self.shape[-1], dtype=np.int8)
try:
matvec_v = np.asarray(self.matvec(v))
except OverflowError:
# Python large `int` promoted to `np.int64`or `np.int32`
self.dtype = np.dtype(int)
else:
self.dtype = matvec_v.dtype
def _matmat(self, X):
"""Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
def _matvec(self, x):
"""Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(x.reshape(-1, 1))
def matvec(self, x):
"""Matrix-vector multiplication.
Performs the operation y=A@x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (N,) and x.shape != (N,1):
raise ValueError('dimension mismatch')
y = self._matvec(x)
if isinstance(x, np.matrix):
y = asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(M)
elif x.ndim == 2:
y = y.reshape(M,1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, x):
"""Adjoint matrix-vector multiplication.
Performs the operation y = A^H @ x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the x argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (M,) and x.shape != (M,1):
raise ValueError('dimension mismatch')
y = self._rmatvec(x)
if isinstance(x, np.matrix):
y = asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(N)
elif x.ndim == 2:
y = y.reshape(N,1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, x):
"""Default implementation of _rmatvec; defers to adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
if (hasattr(self, "_rmatmat")
and type(self)._rmatmat != LinearOperator._rmatmat):
# Try to use _rmatmat as a fallback
return self._rmatmat(x.reshape(-1, 1)).reshape(-1)
raise NotImplementedError
else:
return self.H.matvec(x)
def matmat(self, X):
"""Matrix-matrix multiplication.
Performs the operation y=A@X where A is an MxN linear
operator and X dense N*K matrix or ndarray.
Parameters
----------
X : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the X argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
if not (issparse(X) or is_pydata_spmatrix(X)):
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d')
if X.shape[0] != self.shape[1]:
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
try:
Y = self._matmat(X)
except Exception as e:
if issparse(X) or is_pydata_spmatrix(X):
raise TypeError(
"Unable to multiply a LinearOperator with a sparse matrix."
" Wrap the matrix in aslinearoperator first."
) from e
raise
if isinstance(Y, np.matrix):
Y = asmatrix(Y)
return Y
def rmatmat(self, X):
"""Adjoint matrix-matrix multiplication.
Performs the operation y = A^H @ x where A is an MxN linear
operator and x is a column vector or 1-d array, or 2-d array.
The default implementation defers to the adjoint.
Parameters
----------
X : {matrix, ndarray}
A matrix or 2D array.
Returns
-------
Y : {matrix, ndarray}
A matrix or 2D array depending on the type of the input.
Notes
-----
This rmatmat wraps the user-specified rmatmat routine.
"""
if not (issparse(X) or is_pydata_spmatrix(X)):
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d')
if X.shape[0] != self.shape[0]:
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
try:
Y = self._rmatmat(X)
except Exception as e:
if issparse(X) or is_pydata_spmatrix(X):
raise TypeError(
"Unable to multiply a LinearOperator with a sparse matrix."
" Wrap the matrix in aslinearoperator() first."
) from e
raise
if isinstance(Y, np.matrix):
Y = asmatrix(Y)
return Y
def _rmatmat(self, X):
"""Default implementation of _rmatmat defers to rmatvec or adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])
else:
return self.H.matmat(X)
def __call__(self, x):
return self@x
def __mul__(self, x):
return self.dot(x)
def __truediv__(self, other):
if not np.isscalar(other):
raise ValueError("Can only divide a linear operator by a scalar.")
return _ScaledLinearOperator(self, 1.0/other)
def dot(self, x):
"""Matrix-matrix or matrix-vector multiplication.
Parameters
----------
x : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Ax : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(x, LinearOperator):
return _ProductLinearOperator(self, x)
elif np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
if not issparse(x) and not is_pydata_spmatrix(x):
# Sparse matrices shouldn't be converted to numpy arrays.
x = np.asarray(x)
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
return self.matvec(x)
elif x.ndim == 2:
return self.matmat(x)
else:
raise ValueError(f'expected 1-d or 2-d array or matrix, got {x!r}')
def __matmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
return self._rdot(x)
def _rdot(self, x):
"""Matrix-matrix or matrix-vector multiplication from the right.
Parameters
----------
x : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
xA : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x from the right.
Notes
-----
This is copied from dot to implement right multiplication.
"""
if isinstance(x, LinearOperator):
return _ProductLinearOperator(x, self)
elif np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
if not issparse(x) and not is_pydata_spmatrix(x):
# Sparse matrices shouldn't be converted to numpy arrays.
x = np.asarray(x)
# We use transpose instead of rmatvec/rmatmat to avoid
# unnecessary complex conjugation if possible.
if x.ndim == 1 or x.ndim == 2 and x.shape[0] == 1:
return self.T.matvec(x.T).T
elif x.ndim == 2:
return self.T.matmat(x.T).T
else:
raise ValueError(f'expected 1-d or 2-d array or matrix, got {x!r}')
def __pow__(self, p):
if np.isscalar(p):
return _PowerLinearOperator(self, p)
else:
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
else:
return NotImplemented
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M,N = self.shape
if self.dtype is None:
dt = 'unspecified dtype'
else:
dt = 'dtype=' + str(self.dtype)
return f'<{M}x{N} {self.__class__.__name__} with {dt}>'
def adjoint(self):
"""Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
H = property(adjoint)
def transpose(self):
"""Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
T = property(transpose)
def _adjoint(self):
"""Default implementation of _adjoint; defers to rmatvec."""
return _AdjointLinearOperator(self)
def _transpose(self):
""" Default implementation of _transpose; defers to rmatvec + conj"""
return _TransposedLinearOperator(self)
class _CustomLinearOperator(LinearOperator):
"""Linear operator defined in terms of user-specified operations."""
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
dtype=None, rmatmat=None):
super().__init__(dtype, shape)
self.args = ()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__rmatmat_impl = rmatmat
self.__matmat_impl = matmat
self._init_dtype()
def _matmat(self, X):
if self.__matmat_impl is not None:
return self.__matmat_impl(X)
else:
return super()._matmat(X)
def _matvec(self, x):
return self.__matvec_impl(x)
def _rmatvec(self, x):
func = self.__rmatvec_impl
if func is None:
raise NotImplementedError("rmatvec is not defined")
return self.__rmatvec_impl(x)
def _rmatmat(self, X):
if self.__rmatmat_impl is not None:
return self.__rmatmat_impl(X)
else:
return super()._rmatmat(X)
def _adjoint(self):
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
matvec=self.__rmatvec_impl,
rmatvec=self.__matvec_impl,
matmat=self.__rmatmat_impl,
rmatmat=self.__matmat_impl,
dtype=self.dtype)
class _AdjointLinearOperator(LinearOperator):
"""Adjoint of arbitrary Linear Operator"""
def __init__(self, A):
shape = (A.shape[1], A.shape[0])
super().__init__(dtype=A.dtype, shape=shape)
self.A = A
self.args = (A,)
def _matvec(self, x):
return self.A._rmatvec(x)
def _rmatvec(self, x):
return self.A._matvec(x)
def _matmat(self, x):
return self.A._rmatmat(x)
def _rmatmat(self, x):
return self.A._matmat(x)
class _TransposedLinearOperator(LinearOperator):
"""Transposition of arbitrary Linear Operator"""
def __init__(self, A):
shape = (A.shape[1], A.shape[0])
super().__init__(dtype=A.dtype, shape=shape)
self.A = A
self.args = (A,)
def _matvec(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatvec(np.conj(x)))
def _rmatvec(self, x):
return np.conj(self.A._matvec(np.conj(x)))
def _matmat(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatmat(np.conj(x)))
def _rmatmat(self, x):
return np.conj(self.A._matmat(np.conj(x)))
def _get_dtype(operators, dtypes=None):
if dtypes is None:
dtypes = []
for obj in operators:
if obj is not None and hasattr(obj, 'dtype'):
dtypes.append(obj.dtype)
return np.result_type(*dtypes)
class _SumLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
self.args = (A, B)
super().__init__(_get_dtype([A, B]), A.shape)
def _matvec(self, x):
return self.args[0].matvec(x) + self.args[1].matvec(x)
def _rmatvec(self, x):
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
def _rmatmat(self, x):
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
def _matmat(self, x):
return self.args[0].matmat(x) + self.args[1].matmat(x)
def _adjoint(self):
A, B = self.args
return A.H + B.H
class _ProductLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape[1] != B.shape[0]:
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
super().__init__(_get_dtype([A, B]),
(A.shape[0], B.shape[1]))
self.args = (A, B)
def _matvec(self, x):
return self.args[0].matvec(self.args[1].matvec(x))
def _rmatvec(self, x):
return self.args[1].rmatvec(self.args[0].rmatvec(x))
def _rmatmat(self, x):
return self.args[1].rmatmat(self.args[0].rmatmat(x))
def _matmat(self, x):
return self.args[0].matmat(self.args[1].matmat(x))
def _adjoint(self):
A, B = self.args
return B.H @ A.H
class _ScaledLinearOperator(LinearOperator):
def __init__(self, A, alpha):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if not np.isscalar(alpha):
raise ValueError('scalar expected as alpha')
if isinstance(A, _ScaledLinearOperator):
A, alpha_original = A.args
# Avoid in-place multiplication so that we don't accidentally mutate
# the original prefactor.
alpha = alpha * alpha_original
dtype = _get_dtype([A], [type(alpha)])
super().__init__(dtype, A.shape)
self.args = (A, alpha)
# Note: args[1] is alpha (a scalar), so use `*` below, not `@`
def _matvec(self, x):
return self.args[1] * self.args[0].matvec(x)
def _rmatvec(self, x):
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
def _rmatmat(self, x):
return np.conj(self.args[1]) * self.args[0].rmatmat(x)
def _matmat(self, x):
return self.args[1] * self.args[0].matmat(x)
def _adjoint(self):
A, alpha = self.args
return A.H * np.conj(alpha)
class _PowerLinearOperator(LinearOperator):
def __init__(self, A, p):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if A.shape[0] != A.shape[1]:
raise ValueError(f'square LinearOperator expected, got {A!r}')
if not isintlike(p) or p < 0:
raise ValueError('non-negative integer expected as p')
super().__init__(_get_dtype([A]), A.shape)
self.args = (A, p)
def _power(self, fun, x):
res = np.array(x, copy=True)
for i in range(self.args[1]):
res = fun(res)
return res
def _matvec(self, x):
return self._power(self.args[0].matvec, x)
def _rmatvec(self, x):
return self._power(self.args[0].rmatvec, x)
def _rmatmat(self, x):
return self._power(self.args[0].rmatmat, x)
def _matmat(self, x):
return self._power(self.args[0].matmat, x)
def _adjoint(self):
A, p = self.args
return A.H ** p
class MatrixLinearOperator(LinearOperator):
def __init__(self, A):
super().__init__(A.dtype, A.shape)
self.A = A
self.__adj = None
self.args = (A,)
def _matmat(self, X):
return self.A.dot(X)
def _adjoint(self):
if self.__adj is None:
self.__adj = _AdjointMatrixOperator(self.A)
return self.__adj
class _AdjointMatrixOperator(MatrixLinearOperator):
def __init__(self, adjoint_array):
self.A = adjoint_array.T.conj()
self.args = (adjoint_array,)
self.shape = adjoint_array.shape[1], adjoint_array.shape[0]
@property
def dtype(self):
return self.args[0].dtype
def _adjoint(self):
return MatrixLinearOperator(self.args[0])
class IdentityOperator(LinearOperator):
def __init__(self, shape, dtype=None):
super().__init__(dtype, shape)
def _matvec(self, x):
return x
def _rmatvec(self, x):
return x
def _rmatmat(self, x):
return x
def _matmat(self, x):
return x
def _adjoint(self):
return self
def aslinearoperator(A):
"""Return A as a LinearOperator.
'A' may be any of the following types:
- ndarray
- matrix
- sparse array (e.g. csr_array, lil_array, etc.)
- LinearOperator
- An object with .shape and .matvec attributes
See the LinearOperator documentation for additional information.
Notes
-----
If 'A' has no .dtype attribute, the data type is determined by calling
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
call upon the linear operator creation.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import aslinearoperator
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
>>> aslinearoperator(M)
<2x3 MatrixLinearOperator with dtype=int32>
"""
if isinstance(A, LinearOperator):
return A
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
if A.ndim > 2:
raise ValueError('array must have ndim <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif issparse(A) or is_pydata_spmatrix(A):
return MatrixLinearOperator(A)
else:
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
rmatvec = None
rmatmat = None
dtype = None
if hasattr(A, 'rmatvec'):
rmatvec = A.rmatvec
if hasattr(A, 'rmatmat'):
rmatmat = A.rmatmat
if hasattr(A, 'dtype'):
dtype = A.dtype
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
rmatmat=rmatmat, dtype=dtype)
else:
raise TypeError('type not understood')

View file

@ -0,0 +1,20 @@
"Iterative Solvers for Sparse Linear Systems"
#from info import __doc__
from .iterative import *
from .minres import minres
from .lgmres import lgmres
from .lsqr import lsqr
from .lsmr import lsmr
from ._gcrotmk import gcrotmk
from .tfqmr import tfqmr
__all__ = [
'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres',
'lgmres', 'lsmr', 'lsqr',
'minres', 'qmr', 'tfqmr'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,503 @@
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
from .iterative import _get_atol_rtol
from scipy.sparse.linalg._isolve.utils import make_system
__all__ = ['gcrotmk']
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
prepend_outer_v=False):
"""
FGMRES Arnoldi process, with optional projection or augmentation
Parameters
----------
matvec : callable
Operation A*x
v0 : ndarray
Initial vector, normalized to nrm2(v0) == 1
m : int
Number of GMRES rounds
atol : float
Absolute tolerance for early exit
lpsolve : callable
Left preconditioner L
rpsolve : callable
Right preconditioner R
cs : list of (ndarray, ndarray)
Columns of matrices C and U in GCROT
outer_v : list of ndarrays
Augmentation vectors in LGMRES
prepend_outer_v : bool, optional
Whether augmentation vectors come before or after
Krylov iterates
Raises
------
LinAlgError
If nans encountered
Returns
-------
Q, R : ndarray
QR decomposition of the upper Hessenberg H=QR
B : ndarray
Projections corresponding to matrix C
vs : list of ndarray
Columns of matrix V
zs : list of ndarray
Columns of matrix Z
y : ndarray
Solution to ||H y - e_1||_2 = min!
res : float
The final (preconditioned) residual norm
"""
if lpsolve is None:
def lpsolve(x):
return x
if rpsolve is None:
def rpsolve(x):
return x
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
vs = [v0]
zs = []
y = None
res = np.nan
m = m + len(outer_v)
# Orthogonal projection coefficients
B = np.zeros((len(cs), m), dtype=v0.dtype)
# H is stored in QR factorized form
Q = np.ones((1, 1), dtype=v0.dtype)
R = np.zeros((1, 0), dtype=v0.dtype)
eps = np.finfo(v0.dtype).eps
breakdown = False
# FGMRES Arnoldi process
for j in range(m):
# L A Z = C B + V H
if prepend_outer_v and j < len(outer_v):
z, w = outer_v[j]
elif prepend_outer_v and j == len(outer_v):
z = rpsolve(v0)
w = None
elif not prepend_outer_v and j >= m - len(outer_v):
z, w = outer_v[j - (m - len(outer_v))]
else:
z = rpsolve(vs[-1])
w = None
if w is None:
w = lpsolve(matvec(z))
else:
# w is clobbered below
w = w.copy()
w_norm = nrm2(w)
# GCROT projection: L A -> (1 - C C^H) L A
# i.e. orthogonalize against C
for i, c in enumerate(cs):
alpha = dot(c, w)
B[i,j] = alpha
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
# Orthogonalize against V
hcur = np.zeros(j+2, dtype=Q.dtype)
for i, v in enumerate(vs):
alpha = dot(v, w)
hcur[i] = alpha
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
hcur[i+1] = nrm2(w)
with np.errstate(over='ignore', divide='ignore'):
# Careful with denormals
alpha = 1/hcur[-1]
if np.isfinite(alpha):
w = scal(alpha, w)
if not (hcur[-1] > eps * w_norm):
# w essentially in the span of previous vectors,
# or we have nans. Bail out after updating the QR
# solution.
breakdown = True
vs.append(w)
zs.append(z)
# Arnoldi LSQ problem
# Add new column to H=Q@R, padding other columns with zeros
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
Q2[:j+1,:j+1] = Q
Q2[j+1,j+1] = 1
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
R2[:j+1,:] = R
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
overwrite_qru=True, check_finite=False)
# Transformed least squares problem
# || Q R y - inner_res_0 * e_1 ||_2 = min!
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
# Residual is immediately known
res = abs(Q[0,-1])
# Check for termination
if res < atol or breakdown:
break
if not np.isfinite(R[j,j]):
# nans encountered, bail out
raise LinAlgError()
# -- Get the LSQ problem solution
# The problem is triangular, but the condition number may be
# bad (or in case of breakdown the last diagonal entry may be
# zero), so use lstsq instead of trtrs.
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
B = B[:,:j+1]
return Q, R, B, vs, zs, y, res
def gcrotmk(A, b, x0=None, *, rtol=1e-5, atol=0., maxiter=1000, M=None, callback=None,
m=20, k=None, CU=None, discard_C=False, truncate='oldest'):
"""
Solve ``Ax = b`` with the flexible GCROT(m,k) algorithm.
Parameters
----------
A : {sparse array, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, `A` can be a linear operator which can
produce ``Ax`` using, e.g.,
`LinearOperator`.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
rtol, atol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``rtol=1e-5`` and ``atol=0.0``.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved. The
default is ``1000``.
M : {sparse array, ndarray, LinearOperator}, optional
Preconditioner for `A`. The preconditioner should approximate the
inverse of `A`. gcrotmk is a 'flexible' algorithm and the preconditioner
can vary from iteration to iteration. Effective preconditioning
dramatically improves the rate of convergence, which implies that
fewer iterations are needed to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as ``callback(xk)``, where ``xk`` is the current solution vector.
m : int, optional
Number of inner FGMRES iterations per each outer iteration.
Default: 20
k : int, optional
Number of vectors to carry between inner FGMRES iterations.
According to [2]_, good values are around `m`.
Default: `m`
CU : list of tuples, optional
List of tuples ``(c, u)`` which contain the columns of the matrices
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
The list given and vectors contained in it are modified in-place.
If not given, start from empty matrices. The ``c`` elements in the
tuples can be ``None``, in which case the vectors are recomputed
via ``c = A u`` on start and orthogonalized as described in [3]_.
discard_C : bool, optional
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
for different linear systems.
truncate : {'oldest', 'smallest'}, optional
Truncation scheme to use. Drop: oldest vectors, or vectors with
smallest singular values using the scheme discussed in [1,2].
See [2]_ for detailed comparison.
Default: 'oldest'
Returns
-------
x : ndarray
The solution found.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
References
----------
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
of GCROT for solving nonsymmetric linear systems'',
SIAM J. Sci. Comput. 32, 172 (2010).
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
''Recycling Krylov subspaces for sequences of linear systems'',
SIAM J. Sci. Comput. 28, 1651 (2006).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import gcrotmk
>>> R = np.random.randn(5, 5)
>>> A = csc_array(R)
>>> b = np.random.randn(5)
>>> x, exit_code = gcrotmk(A, b, atol=1e-5)
>>> print(exit_code)
0
>>> np.allclose(A.dot(x), b)
True
"""
A,M,x,b = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if truncate not in ('oldest', 'smallest'):
raise ValueError(f"Invalid value for 'truncate': {truncate!r}")
matvec = A.matvec
psolve = M.matvec
if CU is None:
CU = []
if k is None:
k = m
axpy, dot, scal = None, None, None
if x0 is None:
r = b.copy()
else:
r = b - matvec(x)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
b_norm = nrm2(b)
# we call this to get the right atol/rtol and raise errors as necessary
atol, rtol = _get_atol_rtol('gcrotmk', b_norm, atol, rtol)
if b_norm == 0:
x = b
return (x, 0)
if discard_C:
CU[:] = [(None, u) for c, u in CU]
# Reorthogonalize old vectors
if CU:
# Sort already existing vectors to the front
CU.sort(key=lambda cu: cu[0] is not None)
# Fill-in missing ones
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
us = []
j = 0
while CU:
# More memory-efficient: throw away old vectors as we go
c, u = CU.pop(0)
if c is None:
c = matvec(u)
C[:,j] = c
j += 1
us.append(u)
# Orthogonalize
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
del C
# C := Q
cs = list(Q.T)
# U := U P R^-1, back-substitution
new_us = []
for j in range(len(cs)):
u = us[P[j]]
for i in range(j):
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
# discard rest of the vectors
break
u = scal(1.0/R[j,j], u)
new_us.append(u)
# Form the new CU lists
CU[:] = list(zip(cs, new_us))[::-1]
if CU:
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
# Solve first the projection operation with respect to the CU
# vectors. This corresponds to modifying the initial guess to
# be
#
# x' = x + U y
# y = argmin_y || b - A (x + U y) ||^2
#
# The solution is y = C^H (b - A x)
for c, u in CU:
yc = dot(c, r)
x = axpy(u, x, x.shape[0], yc)
r = axpy(c, r, r.shape[0], -yc)
# GCROT main iteration
for j_outer in range(maxiter):
# -- callback
if callback is not None:
callback(x)
beta = nrm2(r)
# -- check stopping condition
beta_tol = max(atol, rtol * b_norm)
if beta <= beta_tol and (j_outer > 0 or CU):
# recompute residual to avoid rounding error
r = b - matvec(x)
beta = nrm2(r)
if beta <= beta_tol:
j_outer = -1
break
ml = m + max(k - len(CU), 0)
cs = [c for c, u in CU]
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
r/beta,
ml,
rpsolve=psolve,
atol=max(atol, rtol*b_norm)/beta,
cs=cs)
y *= beta
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
break
#
# At this point,
#
# [A U, A Z] = [C, V] G; G = [ I B ]
# [ 0 H ]
#
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
#
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
#
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
#
#
# GCROT(m,k) update
#
# Define new outer vectors
# ux := (Z - U B) y
ux = zs[0]*y[0]
for z, yc in zip(zs[1:], y[1:]):
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
by = B.dot(y)
for cu, byc in zip(CU, by):
c, u = cu
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
# cx := V H y
with np.errstate(invalid="ignore"):
hy = Q.dot(R.dot(y))
cx = vs[0] * hy[0]
for v, hyc in zip(vs[1:], hy[1:]):
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
# Normalize cx, maintaining cx = A ux
# This new cx is orthogonal to the previous C, by construction
try:
alpha = 1/nrm2(cx)
if not np.isfinite(alpha):
raise FloatingPointError()
except (FloatingPointError, ZeroDivisionError):
# Cannot update, so skip it
continue
cx = scal(alpha, cx)
ux = scal(alpha, ux)
# Update residual and solution
gamma = dot(cx, r)
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
# Truncate CU
if truncate == 'oldest':
while len(CU) >= k and CU:
del CU[0]
elif truncate == 'smallest':
if len(CU) >= k and CU:
# cf. [1,2]
D = solve(R[:-1,:].T, B.T).T
W, sigma, V = svd(D)
# C := C W[:,:k-1], U := U W[:,:k-1]
new_CU = []
for j, w in enumerate(W[:,:k-1].T):
c, u = CU[0]
c = c * w[0]
u = u * w[0]
for cup, wp in zip(CU[1:], w[1:]):
cp, up = cup
c = axpy(cp, c, c.shape[0], wp)
u = axpy(up, u, u.shape[0], wp)
# Reorthogonalize at the same time; not necessary
# in exact arithmetic, but floating point error
# tends to accumulate here
for cp, up in new_CU:
alpha = dot(cp, c)
c = axpy(cp, c, c.shape[0], -alpha)
u = axpy(up, u, u.shape[0], -alpha)
alpha = nrm2(c)
c = scal(1.0/alpha, c)
u = scal(1.0/alpha, u)
new_CU.append((c, u))
CU[:] = new_CU
# Add new vector to CU
CU.append((cx, ux))
# Include the solution vector to the span
CU.append((None, x.copy()))
if discard_C:
CU[:] = [(None, uz) for cz, uz in CU]
return x, j_outer + 1

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,230 @@
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import get_blas_funcs
from .iterative import _get_atol_rtol
from .utils import make_system
from ._gcrotmk import _fgmres
__all__ = ['lgmres']
def lgmres(A, b, x0=None, *, rtol=1e-5, atol=0., maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
prepend_outer_v=False):
"""
Solve ``Ax = b`` with the LGMRES algorithm.
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse array, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
rtol, atol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``rtol=1e-5``, the default for ``atol`` is ``0.0``.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse array, ndarray, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [1]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A@v in addition to vectors `v`
in the `outer_v` list. Default is True.
prepend_outer_v : bool, optional
Whether to put outer_v augmentation vectors before Krylov iterates.
In standard LGMRES, prepend_outer_v=False.
Returns
-------
x : ndarray
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
Anal. Appl. 26, 962 (2005).
.. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
restarted GMRES", PhD thesis, University of Colorado (2003).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import lgmres
>>> A = csc_array([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = lgmres(A, b, atol=1e-5)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A,M,x,b = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
nrm2 = get_blas_funcs('nrm2', [b])
b_norm = nrm2(b)
# we call this to get the right atol/rtol and raise errors as necessary
atol, rtol = _get_atol_rtol('lgmres', b_norm, atol, rtol)
if b_norm == 0:
x = b
return (x, 0)
ptol_max_factor = 1.0
for k_outer in range(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
(x, r_outer))
# -- check stopping condition
r_norm = nrm2(r_outer)
if r_norm <= max(atol, rtol * b_norm):
break
# -- inner LGMRES iteration
v0 = -psolve(r_outer)
inner_res_0 = nrm2(v0)
if inner_res_0 == 0:
rnorm = nrm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
f"|v| ~ {rnorm:.1g}, |M v| = 0")
v0 = scal(1.0/inner_res_0, v0)
ptol = min(ptol_max_factor, max(atol, rtol*b_norm)/r_norm)
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
v0,
inner_m,
lpsolve=psolve,
atol=ptol,
outer_v=outer_v,
prepend_outer_v=prepend_outer_v)
y *= inner_res_0
if not np.isfinite(y).all():
# Overflow etc. in computation. There's no way to
# recover from this, so we have to bail out.
raise LinAlgError()
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
return x, k_outer + 1
# Inner loop tolerance control
if pres > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
# -- GMRES terminated: eval solution
dx = zs[0]*y[0]
for w, yc in zip(zs[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = nrm2(dx)
if nx > 0:
if store_outer_Av:
q = Q.dot(R.dot(y))
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return x, maxiter
return x, 0

View file

@ -0,0 +1,486 @@
"""
Copyright (C) 2010 David Fong and Michael Saunders
LSMR uses an iterative method.
07 Jun 2010: Documentation updated
03 Jun 2010: First release version in Python
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
__all__ = ['lsmr']
from numpy import zeros, inf, atleast_1d, result_type
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg._interface import aslinearoperator
from scipy.sparse.linalg._isolve.lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False, x0=None):
"""Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
``A`` is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. ``b`` is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
Parameters
----------
A : {sparse array, ndarray, LinearOperator}
Matrix A in the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^H x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : array_like, shape (m,)
Vector ``b`` in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization. Default is 0.
atol, btol : float, optional
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, `lsmr` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, `lsmr` terminates when ``norm(A^H r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default),
the final ``norm(r)`` should be accurate to about 6
digits. (The final ``x`` will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of ``A`` and ``b`` respectively. For example, if the entries
of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive. Default is 1e8.
maxiter : int, optional
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed. Default is False.
show : bool, optional
Print iterations logs if ``show=True``. Default is False.
x0 : array_like, shape (n,), optional
Initial guess of ``x``, if None zeros are used. Default is None.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a
solution.
= 1 means x is an approximate solution to A@x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^H (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
Notes
-----
.. versionadded:: 0.11.0
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
:arxiv:`1006.0758`
.. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import lsmr
>>> A = csc_array([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution ``[0, 0]``
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
0
>>> x
array([0., 0.])
The stopping code ``istop=0`` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains
``[0., 0.]``. The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> normr
4.440892098500627e-16
As indicated by ``istop=1``, `lsmr` found a solution obeying the tolerance
limits. The given solution ``[1., -1.]`` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> normr
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `normr`
contains the minimal distance that was found.
"""
A = aslinearoperator(A)
b = atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
msg = ('The exact solution is x = 0, or x = x0, if x0 was given ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm Ar'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20 # print frequency (for repeating the heading)
pcount = 0 # print counter
m, n = A.shape
# stores the num of singular values
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if x0 is None:
dtype = result_type(A, b, float)
else:
dtype = result_type(A, b, x0, float)
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print(f'The matrix A has {m} rows and {n} columns')
print(f'damp = {damp:20.14e}\n')
print(f'atol = {atol:8.2e} conlim = {conlim:8.2e}\n')
print(f'btol = {btol:8.2e} maxiter = {maxiter:8g}\n')
u = b
normb = norm(b)
if x0 is None:
x = zeros(n, dtype)
beta = normb.copy()
else:
x = atleast_1d(x0.copy())
u = u - A.matvec(x)
beta = norm(u)
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u)
alpha = norm(v)
else:
v = zeros(n, dtype)
alpha = 0
if alpha > 0:
v = (1 / alpha) * v
# Initialize variables for 1st iteration.
itn = 0
zetabar = alpha * beta
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n, dtype)
# Initialize variables for estimation of ||r||.
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
# Initialize variables for estimation of ||A|| and cond(A)
normA2 = alpha * alpha
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
# Items for use in stopping rules, normb set earlier
istop = 0
ctol = 0
if conlim > 0:
ctol = 1 / conlim
normr = beta
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
normar = alpha * beta
if normar == 0:
if show:
print(msg[0])
return x, istop, itn, normr, normar, normA, condA, normx
if normb == 0:
x[()] = 0
return x, istop, itn, normr, normar, normA, condA, normx
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = alpha / beta
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {normr:10.3e} {normar:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
print(''.join([str1, str2, str3]))
# Main iteration loop.
while itn < maxiter:
itn = itn + 1
# Perform the next step of the bidiagonalization to obtain the
# next beta, u, alpha, v. These satisfy the relations
# beta*u = A@v - alpha*u,
# alpha*v = A'@u - beta*v.
u *= -alpha
u += A.matvec(v)
beta = norm(u)
if beta > 0:
u *= (1 / beta)
v *= -beta
v += A.rmatvec(u)
alpha = norm(v)
if alpha > 0:
v *= (1 / alpha)
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
# Construct rotation Qhat_{k,2k+1}.
chat, shat, alphahat = _sym_ortho(alphabar, damp)
# Use a plane rotation (Q_i) to turn B_i to R_i
rhoold = rho
c, s, rho = _sym_ortho(alphahat, beta)
thetanew = s*alpha
alphabar = c*alpha
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
rhobarold = rhobar
zetaold = zeta
thetabar = sbar * rho
rhotemp = cbar * rho
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
zeta = cbar * zetabar
zetabar = - sbar * zetabar
# Update h, h_hat, x.
hbar *= - (thetabar * rho / (rhoold * rhobarold))
hbar += h
x += (zeta / (rho * rhobar)) * hbar
h *= - (thetanew / rho)
h += v
# Estimate of ||r||.
# Apply rotation Qhat_{k,2k+1}.
betaacute = chat * betadd
betacheck = -shat * betadd
# Apply rotation Q_{k,k+1}.
betahat = c * betaacute
betadd = -s * betaacute
# Apply rotation Qtilde_{k-1}.
# betad = betad_{k-1} here.
thetatildeold = thetatilde
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
thetatilde = stildeold * rhobar
rhodold = ctildeold * rhobar
betad = - stildeold * betad + ctildeold * betahat
# betad = betad_k here.
# rhodold = rhod_k here.
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
taud = (zeta - thetatilde * tautildeold) / rhodold
d = d + betacheck * betacheck
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
# Estimate ||A||.
normA2 = normA2 + beta * beta
normA = sqrt(normA2)
normA2 = normA2 + alpha * alpha
# Estimate cond(A).
maxrbar = max(maxrbar, rhobarold)
if itn > 1:
minrbar = min(minrbar, rhobarold)
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
# Test for convergence.
# Compute norms for convergence testing.
normar = abs(zetabar)
normx = norm(x)
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = normr / normb
if (normA * normr) != 0:
test2 = normar / (normA * normr)
else:
test2 = inf
test3 = 1 / condA
t1 = test1 / (1 + normA * normx / normb)
rtol = btol + atol * normA * normx / normb
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normAl tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= maxiter:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
if show:
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
(istop != 0):
if pcount >= pfreq:
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = pcount + 1
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {normr:10.3e} {normar:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
str4 = f' {normA:8.1e} {condA:8.1e}'
print(''.join([str1, str2, str3, str4]))
if istop > 0:
break
# Print the stopping condition.
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print(f'istop ={istop:8g} normr ={normr:8.1e}')
print(f' normA ={normA:8.1e} normAr ={normar:8.1e}')
print(f'itn ={itn:8g} condA ={condA:8.1e}')
print(f' normx ={normx:8.1e}')
print(str1, str2)
print(str3, str4)
return x, istop, itn, normr, normar, normA, condA, normx

View file

@ -0,0 +1,589 @@
"""Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg._interface import aslinearoperator
from scipy.sparse._sputils import convert_pydata_sparse_to_scipy
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
iter_lim=None, show=False, calc_var=False, x0=None):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||Ax - b||^2`` or
``min ||Ax - b||^2 + d^2 ||x - x0||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve Ax = b
2. Linear least squares -- solve Ax = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( damp*x0 )
in the least-squares sense
Parameters
----------
A : {sparse array, ndarray, LinearOperator}
Representation of an m-by-n matrix.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : array_like, shape (m,)
Right-hand side vector ``b``.
damp : float
Damping coefficient. Default is 0.
atol, btol : float, optional
Stopping tolerances. `lsqr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, `lsqr` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, `lsqr` terminates when ``norm(A^H r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default),
the final ``norm(r)`` should be accurate to about 6
digits. (The final ``x`` will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of ``A`` and ``b`` respectively. For example, if the entries
of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive. Default is 1e8.
iter_lim : int, optional
Explicit limitation on number of iterations (for safety).
show : bool, optional
Display an iteration log. Default is False.
calc_var : bool, optional
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used. Default is None.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x - x0)^2 )``. Equal to `r1norm`
if ``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'@r - damp^2*(x - x0))``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A@x0``.
2. Use LSQR to solve the system ``A@dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A@x = b and k2 iterations to solve A@dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A@x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A@dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M@x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A@M(inverse)@z =
b``, after which x can be recovered by solving M@x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import lsqr
>>> A = csc_array([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution ``[0, 0]``
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsqr(A, b)[:4]
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code ``istop=0`` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains
``[0., 0.]``. The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> r1norm
4.440892098500627e-16
As indicated by ``istop=1``, `lsqr` found a solution obeying the tolerance
limits. The given solution ``[1., -1.]`` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> r1norm
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `r1norm`
contains the norm of the minimal residual that was found.
"""
A = convert_pydata_sparse_to_scipy(A)
A = aslinearoperator(A)
b = np.atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = f'The matrix A has {m} rows and {n} columns'
str2 = f'damp = {damp:20.14e} calc_var = {calc_var:8g}'
str3 = f'atol = {atol:8.2e} conlim = {conlim:8.2e}'
str4 = f'btol = {btol:8.2e} iter_lim = {iter_lim:8g}'
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
# Set up the first vectors u and v for the bidiagonalization.
# These satisfy beta*u = b - A@x, alfa*v = A'@u.
u = b
bnorm = np.linalg.norm(b)
if x0 is None:
x = np.zeros(n)
beta = bnorm.copy()
else:
x = np.asarray(x0)
u = u - A.matvec(x)
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
else:
v = x.copy()
alfa = 0
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
if show:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {r1norm:10.3e} {r2norm:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
# Perform the next step of the bidiagonalization to obtain the
# next beta, u, alfa, v. These satisfy the relations
# beta*u = a@v - alfa*u,
# alfa*v = A'@u - beta*v.
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + dampsq)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
if damp > 0:
rhobar1 = sqrt(rhobar**2 + dampsq)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
else:
# cs1 = 1 and sn1 = 0
rhobar1 = rhobar
psi = 0.
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x - x0||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x - x0||^2).
# Although there is cancellation, it might be accurate enough.
if damp > 0:
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
else:
r1norm = rnorm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
if show:
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
str1 = f'{itn:6g} {x[0]:12.5e}'
str2 = f' {r1norm:10.3e} {r2norm:10.3e}'
str3 = f' {test1:8.1e} {test2:8.1e}'
str4 = f' {anorm:8.1e} {acond:8.1e}'
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = f'istop ={istop:8g} r1norm ={r1norm:8.1e}'
str2 = f'anorm ={anorm:8.1e} arnorm ={arnorm:8.1e}'
str3 = f'itn ={itn:8g} r2norm ={r2norm:8.1e}'
str4 = f'acond ={acond:8.1e} xnorm ={xnorm:8.1e}'
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var

View file

@ -0,0 +1,372 @@
from numpy import inner, zeros, inf, finfo
from numpy.linalg import norm
from math import sqrt
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, *, rtol=1e-5, shift=0.0, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Solve ``Ax = b`` with the MINimum RESidual method, for a symmetric `A`.
MINRES minimizes norm(Ax - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse array, ndarray, LinearOperator}
The real symmetric N-by-N matrix of the linear system
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : ndarray
Starting guess for the solution.
shift : float
Value to apply to the system ``(A - shift * I)x = b``. Default is 0.
rtol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below ``rtol``.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse array, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
show : bool
If ``True``, print out a summary and metrics related to the solution
during iterations. Default is ``False``.
check : bool
If ``True``, run additional input validation to check that `A` and
`M` (if specified) are symmetric. Default is ``False``.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import minres
>>> A = csc_array([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> A = A + A.T
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = minres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + f'n = {n:3g} shift = {shift:23.14e}')
print(first + f'itnlim = {maxiter:3g} rtol = {rtol:11.2e}')
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
if x0 is None:
r1 = b.copy()
else:
r1 = b - A@x
y = psolve(r1)
beta1 = inner(r1, y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (x, 0)
bnorm = norm(b)
if bnorm == 0:
x = b
return (x, 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
gmax = 0
gmin = finfo(xtype).max
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = norm(x)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * rtol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
if ynorm == 0 or Anorm == 0:
test1 = inf
else:
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
if Anorm == 0:
test2 = inf
else:
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q @ H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if rtol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta1:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= rtol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = f'{itn:6g} {x[0]:12.5e} {test1:10.3e}'
str2 = f' {test2:10.3e}'
str3 = f' {Anorm:8.1e} {Acond:8.1e} {gbar/Anorm:8.1e}'
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + f' istop = {istop:3g} itn ={itn:5g}')
print(last + f' Anorm = {Anorm:12.4e} Acond = {Acond:12.4e}')
print(last + f' rnorm = {rnorm:12.4e} ynorm = {ynorm:12.4e}')
print(last + f' Arnorm = {Arnorm:12.4e}')
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (x,info)

View file

@ -0,0 +1,183 @@
#!/usr/bin/env python
"""Tests for the linalg._isolve.gcrotmk module
"""
import threading
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_array, eye_array, random_array
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg._isolve import gcrotmk, gmres
Am = csr_array(array([[-2,1,0,0,0,9],
[1,-2,1,0,5,0],
[0,1,-2,1,0,0],
[0,0,1,-2,1,0],
[0,3,0,1,-2,1],
[1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = threading.local() # [0]
niter = threading.local() # [0]
def matvec(v):
if not hasattr(count, 'c'):
count.c = [0]
count.c[0] += 1
return Am@v
def cb(v):
if not hasattr(niter, 'n'):
niter.n = [0]
niter.n[0] += 1
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
if not hasattr(niter, 'n'):
niter.n = [0]
if not hasattr(count, 'c'):
count.c = [0]
count.c[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), rtol=1e-14, **kw)
count_0 = count.c[0]
assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
return x0, count_0
class TestGCROTMK:
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
niter.n[0] = 0
x1, count_1 = do_solve(M=M, callback=cb)
assert_equal(count_1, 3)
assert count_1 < count_0/2
assert allclose(x1, x0, rtol=1e-14)
assert niter.n[0] < 3
def test_arnoldi(self):
rng = np.random.default_rng(1)
A = eye_array(2000) + random_array((2000, 2000), density=5e-4, rng=rng)
b = rng.random(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=10, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=10, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert np.linalg.norm(A.dot(x0) - b) > 1e-4
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye_array(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, rtol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, rtol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye_array(3, format='lil')
A[1,1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, rtol=0, maxiter=10)
assert_equal(info, 1)
def test_truncate(self):
np.random.seed(1234)
A = np.random.rand(30, 30) + np.eye(30)
b = np.random.rand(30)
for truncate in ['oldest', 'smallest']:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate,
rtol=1e-4, maxiter=200)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
def test_CU(self):
for discard_C in (True, False):
# Check that C,U behave as expected
CU = []
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
assert_(len(CU) > 0)
assert_(len(CU) <= 6)
if discard_C:
for c, u in CU:
assert_(c is None)
# should converge immediately
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
if discard_C:
assert_equal(count_1, 2 + len(CU))
else:
assert_equal(count_1, 3)
assert_(count_1 <= count_0/2)
assert_allclose(x1, x0, atol=1e-14)
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = gcrotmk(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)

View file

@ -0,0 +1,809 @@
""" Test functions for the sparse.linalg._isolve module
"""
import itertools
import platform
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from numpy import zeros, arange, array, ones, eye, iscomplexobj
from numpy.linalg import norm
from scipy.sparse import dia_array, csr_array, kronsum
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg._isolve import (bicg, bicgstab, cg, cgs,
gcrotmk, gmres, lgmres,
minres, qmr, tfqmr)
# TODO check that method preserve shape and type
# TODO test both preconditioner methods
# list of all solvers under test
_SOLVERS = [bicg, bicgstab, cg, cgs, gcrotmk, gmres, lgmres,
minres, qmr, tfqmr]
CB_TYPE_FILTER = ".*called without specifying `callback_type`.*"
# create parametrized fixture for easy reuse in tests
@pytest.fixture(params=_SOLVERS, scope="session")
def solver(request):
"""
Fixture for all solvers in scipy.sparse.linalg._isolve
"""
return request.param
class Case:
def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
self.name = name
self.A = A
if b is None:
self.b = arange(A.shape[0], dtype=float)
else:
self.b = b
if skip is None:
self.skip = []
else:
self.skip = skip
if nonconvergence is None:
self.nonconvergence = []
else:
self.nonconvergence = nonconvergence
class SingleTest:
def __init__(self, A, b, solver, casename, convergence=True):
self.A = A
self.b = b
self.solver = solver
self.name = casename + '-' + solver.__name__
self.convergence = convergence
def __repr__(self):
return f"<{self.name}>"
class IterativeParams:
def __init__(self):
sym_solvers = [minres, cg]
posdef_solvers = [cg]
real_solvers = [minres]
# list of Cases
self.cases = []
# Symmetric and Positive Definite
N = 40
data = ones((3, N))
data[0, :] = 2
data[1, :] = -1
data[2, :] = -1
Poisson1D = dia_array((data, [0, -1, 1]), shape=(N, N)).tocsr()
self.cases.append(Case("poisson1d", Poisson1D))
# note: minres fails for single precision
self.cases.append(Case("poisson1d-F", Poisson1D.astype('f'),
skip=[minres]))
# Symmetric and Negative Definite
self.cases.append(Case("neg-poisson1d", -Poisson1D,
skip=posdef_solvers))
# note: minres fails for single precision
self.cases.append(Case("neg-poisson1d-F", (-Poisson1D).astype('f'),
skip=posdef_solvers + [minres]))
# 2-dimensional Poisson equations
Poisson2D = kronsum(Poisson1D, Poisson1D)
# note: minres fails for 2-d poisson problem,
# it will be fixed in the future PR
self.cases.append(Case("poisson2d", Poisson2D, skip=[minres]))
# note: minres fails for single precision
self.cases.append(Case("poisson2d-F", Poisson2D.astype('f'),
skip=[minres]))
# Symmetric and Indefinite
data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]], dtype='d')
RandDiag = dia_array((data, [0]), shape=(10, 10)).tocsr()
self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
self.cases.append(Case("rand-diag-F", RandDiag.astype('f'),
skip=posdef_solvers))
# Random real-valued
rng = np.random.RandomState(1234)
data = rng.rand(4, 4)
self.cases.append(Case("rand", data,
skip=posdef_solvers + sym_solvers))
self.cases.append(Case("rand-F", data.astype('f'),
skip=posdef_solvers + sym_solvers))
# Random symmetric real-valued
rng = np.random.RandomState(1234)
data = rng.rand(4, 4)
data = data + data.T
self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
self.cases.append(Case("rand-sym-F", data.astype('f'),
skip=posdef_solvers))
# Random pos-def symmetric real
np.random.seed(1234)
data = np.random.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-sym-pd", data))
# note: minres fails for single precision
self.cases.append(Case("rand-sym-pd-F", data.astype('f'),
skip=[minres]))
# Random complex-valued
rng = np.random.RandomState(1234)
data = rng.rand(4, 4) + 1j * rng.rand(4, 4)
skip_cmplx = posdef_solvers + sym_solvers + real_solvers
self.cases.append(Case("rand-cmplx", data, skip=skip_cmplx))
self.cases.append(Case("rand-cmplx-F", data.astype('F'),
skip=skip_cmplx))
# Random hermitian complex-valued
rng = np.random.RandomState(1234)
data = rng.rand(4, 4) + 1j * rng.rand(4, 4)
data = data + data.T.conj()
self.cases.append(Case("rand-cmplx-herm", data,
skip=posdef_solvers + real_solvers))
self.cases.append(Case("rand-cmplx-herm-F", data.astype('F'),
skip=posdef_solvers + real_solvers))
# Random pos-def hermitian complex-valued
rng = np.random.RandomState(1234)
data = rng.rand(9, 9) + 1j * rng.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
self.cases.append(Case("rand-cmplx-sym-pd-F", data.astype('F'),
skip=real_solvers))
# Non-symmetric and Positive Definite
#
# cgs, qmr, bicg and tfqmr fail to converge on this one
# -- algorithmic limitation apparently
data = ones((2, 10))
data[0, :] = 2
data[1, :] = -1
A = dia_array((data, [0, -1]), shape=(10, 10)).tocsr()
self.cases.append(Case("nonsymposdef", A,
skip=sym_solvers + [cgs, qmr, bicg, tfqmr]))
self.cases.append(Case("nonsymposdef-F", A.astype('F'),
skip=sym_solvers + [cgs, qmr, bicg, tfqmr]))
# Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr/tfqmr breakdown
A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
[0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
[1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
[-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
[0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
[0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
[0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
[0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float)
b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float)
assert (A == A.T).all()
self.cases.append(Case("sym-nonpd", A, b,
skip=posdef_solvers,
nonconvergence=[cgs, bicg, bicgstab, qmr, tfqmr]
)
)
def generate_tests(self):
# generate test cases with skips applied
tests = []
for case in self.cases:
for solver in _SOLVERS:
if (solver in case.skip):
continue
if solver in case.nonconvergence:
tests += [SingleTest(case.A, case.b, solver, case.name,
convergence=False)]
else:
tests += [SingleTest(case.A, case.b, solver, case.name)]
return tests
cases = IterativeParams().generate_tests()
@pytest.fixture(params=cases, ids=[x.name for x in cases], scope="module")
def case(request):
"""
Fixture for all cases in IterativeParams
"""
return request.param
@pytest.mark.thread_unsafe
def test_maxiter(case):
if not case.convergence:
pytest.skip("Solver - Breakdown case, see gh-8829")
A = case.A
rtol = 1e-12
b = case.b
x0 = 0 * b
residuals = []
def callback(x):
if x.ndim == 0:
residuals.append(norm(b - case.A * x))
else:
residuals.append(norm(b - case.A @ x))
if case.solver == gmres:
with pytest.warns(DeprecationWarning, match=CB_TYPE_FILTER):
x, info = case.solver(A, b, x0=x0, rtol=rtol, maxiter=1, callback=callback)
else:
x, info = case.solver(A, b, x0=x0, rtol=rtol, maxiter=1, callback=callback)
assert len(residuals) == 1
assert info == 1
def test_convergence(case):
A = case.A
if A.dtype.char in "dD":
rtol = 1e-8
else:
rtol = 1e-2
b = case.b
x0 = 0 * b
x, info = case.solver(A, b, x0=x0, rtol=rtol)
assert_array_equal(x0, 0 * b) # ensure that x0 is not overwritten
if case.convergence:
assert info == 0
assert norm(A @ x - b) <= norm(b) * rtol
else:
assert info != 0
assert norm(A @ x - b) <= norm(b)
def test_precond_dummy(case):
if not case.convergence:
pytest.skip("Solver - Breakdown case, see gh-8829")
rtol = 1e-8
def identity(b, which=None):
"""trivial preconditioner"""
return b
A = case.A
M, N = A.shape
# Ensure the diagonal elements of A are non-zero before calculating
# 1.0/A.diagonal()
diagOfA = A.diagonal()
if np.count_nonzero(diagOfA) == len(diagOfA):
dia_array(([1.0 / diagOfA], [0]), shape=(M, N))
b = case.b
x0 = 0 * b
precond = LinearOperator(A.shape, identity, rmatvec=identity)
if case.solver is qmr:
x, info = case.solver(A, b, M1=precond, M2=precond, x0=x0, rtol=rtol)
else:
x, info = case.solver(A, b, M=precond, x0=x0, rtol=rtol)
assert info == 0
assert norm(A @ x - b) <= norm(b) * rtol
A = aslinearoperator(A)
A.psolve = identity
A.rpsolve = identity
x, info = case.solver(A, b, x0=x0, rtol=rtol)
assert info == 0
assert norm(A @ x - b) <= norm(b) * rtol
# Specific test for poisson1d and poisson2d cases
@pytest.mark.fail_slow(10)
@pytest.mark.parametrize('case', [x for x in IterativeParams().cases
if x.name in ('poisson1d', 'poisson2d')],
ids=['poisson1d', 'poisson2d'])
def test_precond_inverse(case):
for solver in _SOLVERS:
if solver in case.skip or solver is qmr:
continue
rtol = 1e-8
def inverse(b, which=None):
"""inverse preconditioner"""
A = case.A
if not isinstance(A, np.ndarray):
A = A.toarray()
return np.linalg.solve(A, b)
def rinverse(b, which=None):
"""inverse preconditioner"""
A = case.A
if not isinstance(A, np.ndarray):
A = A.toarray()
return np.linalg.solve(A.T, b)
matvec_count = [0]
def matvec(b):
matvec_count[0] += 1
return case.A @ b
def rmatvec(b):
matvec_count[0] += 1
return case.A.T @ b
b = case.b
x0 = 0 * b
A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec)
precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse)
# Solve with preconditioner
matvec_count = [0]
x, info = solver(A, b, M=precond, x0=x0, rtol=rtol)
assert info == 0
assert norm(case.A @ x - b) <= norm(b) * rtol
# Solution should be nearly instant
assert matvec_count[0] <= 3
def test_atol(solver):
# TODO: minres / tfqmr. It didn't historically use absolute tolerances, so
# fixing it is less urgent.
if solver in (minres, tfqmr):
pytest.skip("TODO: Add atol to minres/tfqmr")
# Historically this is tested as below, all pass but for some reason
# gcrotmk is over-sensitive to difference between random.seed/rng.random
# Hence tol lower bound is changed from -10 to -9
# np.random.seed(1234)
# A = np.random.rand(10, 10)
# A = A @ A.T + 10 * np.eye(10)
# b = 1e3*np.random.rand(10)
rng = np.random.default_rng(168441431005389)
A = rng.uniform(size=[10, 10])
A = A @ A.T + 10*np.eye(10)
b = 1e3 * rng.uniform(size=10)
b_norm = np.linalg.norm(b)
tols = np.r_[0, np.logspace(-9, 2, 7), np.inf]
# Check effect of badly scaled preconditioners
M0 = rng.standard_normal(size=(10, 10))
M0 = M0 @ M0.T
Ms = [None, 1e-6 * M0, 1e6 * M0]
for M, rtol, atol in itertools.product(Ms, tols, tols):
if rtol == 0 and atol == 0:
continue
if solver is qmr:
if M is not None:
M = aslinearoperator(M)
M2 = aslinearoperator(np.eye(10))
else:
M2 = None
x, info = solver(A, b, M1=M, M2=M2, rtol=rtol, atol=atol)
else:
x, info = solver(A, b, M=M, rtol=rtol, atol=atol)
assert info == 0
residual = A @ x - b
err = np.linalg.norm(residual)
atol2 = rtol * b_norm
# Added 1.00025 fudge factor because of `err` exceeding `atol` just
# very slightly on s390x (see gh-17839)
assert err <= 1.00025 * max(atol, atol2)
def test_zero_rhs(solver):
rng = np.random.default_rng(1684414984100503)
A = rng.random(size=[10, 10])
A = A @ A.T + 10 * np.eye(10)
b = np.zeros(10)
tols = np.r_[np.logspace(-10, 2, 7)]
for tol in tols:
x, info = solver(A, b, rtol=tol)
assert info == 0
assert_allclose(x, 0., atol=1e-15)
x, info = solver(A, b, rtol=tol, x0=ones(10))
assert info == 0
assert_allclose(x, 0., atol=tol)
if solver is not minres:
x, info = solver(A, b, rtol=tol, atol=0, x0=ones(10))
if info == 0:
assert_allclose(x, 0)
x, info = solver(A, b, rtol=tol, atol=tol)
assert info == 0
assert_allclose(x, 0, atol=1e-300)
x, info = solver(A, b, rtol=tol, atol=0)
assert info == 0
assert_allclose(x, 0, atol=1e-300)
@pytest.mark.xfail(reason="see gh-18697")
def test_maxiter_worsening(solver):
if solver not in (gmres, lgmres, qmr):
# these were skipped from the very beginning, see gh-9201; gh-14160
pytest.skip("Solver breakdown case")
# Check error does not grow (boundlessly) with increasing maxiter.
# This can occur due to the solvers hitting close to breakdown,
# which they should detect and halt as necessary.
# cf. gh-9100
if (solver is lgmres and
platform.machine() not in ['x86_64' 'x86', 'aarch64', 'arm64']):
# see gh-17839
pytest.xfail(reason="fails on at least ppc64le, ppc64 and riscv64")
# Singular matrix, rhs numerically not in range
A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685],
[0, -0.13627952880333782 + 6.283185307179586j, 0, 0],
[0, 0, -0.13627952880333782 - 6.283185307179586j, 0],
[0.1112795288033368, 0j, 0j, -0.16127952880333785]])
v = np.ones(4)
best_error = np.inf
# Unable to match the Fortran code tolerance levels with this example
# Original tolerance values
# slack_tol = 7 if platform.machine() == 'aarch64' else 5
slack_tol = 9
for maxiter in range(1, 20):
x, info = solver(A, v, maxiter=maxiter, rtol=1e-8, atol=0)
if info == 0:
assert norm(A @ x - v) <= 1e-8 * norm(v)
error = np.linalg.norm(A @ x - v)
best_error = min(best_error, error)
# Check with slack
assert error <= slack_tol * best_error
def test_x0_working(solver):
# Easy problem
rng = np.random.default_rng(1685363802304750)
n = 10
A = rng.random(size=[n, n])
A = A @ A.T
b = rng.random(n)
x0 = rng.random(n)
if solver is minres:
kw = dict(rtol=1e-6)
else:
kw = dict(atol=0, rtol=1e-6)
x, info = solver(A, b, **kw)
assert info == 0
assert norm(A @ x - b) <= 1e-6 * norm(b)
x, info = solver(A, b, x0=x0, **kw)
assert info == 0
assert norm(A @ x - b) <= 4.5e-6*norm(b)
def test_x0_equals_Mb(case):
if (case.solver is bicgstab) and (case.name == 'nonsymposdef-bicgstab'):
pytest.skip("Solver fails due to numerical noise "
"on some architectures (see gh-15533).")
if case.solver is tfqmr:
pytest.skip("Solver does not support x0='Mb'")
A = case.A
b = case.b
x0 = 'Mb'
rtol = 1e-8
x, info = case.solver(A, b, x0=x0, rtol=rtol)
assert_array_equal(x0, 'Mb') # ensure that x0 is not overwritten
assert info == 0
assert norm(A @ x - b) <= rtol * norm(b)
@pytest.mark.parametrize('solver', _SOLVERS)
def test_x0_solves_problem_exactly(solver):
# See gh-19948
mat = np.eye(2)
rhs = np.array([-1., -1.])
sol, info = solver(mat, rhs, x0=rhs)
assert_allclose(sol, rhs)
assert info == 0
# Specific tfqmr test
@pytest.mark.thread_unsafe
@pytest.mark.parametrize('case', IterativeParams().cases)
def test_show(case, capsys):
def cb(x):
pass
x, info = tfqmr(case.A, case.b, callback=cb, show=True)
out, err = capsys.readouterr()
if case.name == "sym-nonpd":
# no logs for some reason
exp = ""
elif case.name in ("nonsymposdef", "nonsymposdef-F"):
# Asymmetric and Positive Definite
exp = "TFQMR: Linear solve not converged due to reach MAXIT iterations"
else: # all other cases
exp = "TFQMR: Linear solve converged due to reach TOL iterations"
assert out.startswith(exp)
assert err == ""
def test_positional_error(solver):
# from test_x0_working
rng = np.random.default_rng(1685363802304750)
n = 10
A = rng.random(size=[n, n])
A = A @ A.T
b = rng.random(n)
x0 = rng.random(n)
with pytest.raises(TypeError):
solver(A, b, x0, 1e-5)
@pytest.mark.parametrize("atol", ["legacy", None, -1])
def test_invalid_atol(solver, atol):
if solver == minres:
pytest.skip("minres has no `atol` argument")
# from test_x0_working
rng = np.random.default_rng(1685363802304750)
n = 10
A = rng.random(size=[n, n])
A = A @ A.T
b = rng.random(n)
x0 = rng.random(n)
with pytest.raises(ValueError):
solver(A, b, x0, atol=atol)
class TestQMR:
@pytest.mark.filterwarnings('ignore::scipy.sparse.SparseEfficiencyWarning')
def test_leftright_precond(self):
"""Check that QMR works with left and right preconditioners"""
from scipy.sparse.linalg._dsolve import splu
from scipy.sparse.linalg._interface import LinearOperator
n = 100
dat = ones(n)
A = dia_array(([-2 * dat, 4 * dat, -dat], [-1, 0, 1]), shape=(n, n))
b = arange(n, dtype='d')
L = dia_array(([-dat / 2, dat], [-1, 0]), shape=(n, n))
U = dia_array(([4 * dat, -dat], [0, 1]), shape=(n, n))
L_solver = splu(L)
U_solver = splu(U)
def L_solve(b):
return L_solver.solve(b)
def U_solve(b):
return U_solver.solve(b)
def LT_solve(b):
return L_solver.solve(b, 'T')
def UT_solve(b):
return U_solver.solve(b, 'T')
M1 = LinearOperator((n, n), matvec=L_solve, rmatvec=LT_solve)
M2 = LinearOperator((n, n), matvec=U_solve, rmatvec=UT_solve)
rtol = 1e-8
x, info = qmr(A, b, rtol=rtol, maxiter=15, M1=M1, M2=M2)
assert info == 0
assert norm(A @ x - b) <= rtol * norm(b)
class TestGMRES:
def test_basic(self):
A = np.vander(np.arange(10) + 1)[:, ::-1]
b = np.zeros(10)
b[0] = 1
x_gm, err = gmres(A, b, restart=5, maxiter=1)
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
@pytest.mark.filterwarnings(f"ignore:{CB_TYPE_FILTER}:DeprecationWarning")
def test_callback(self):
def store_residual(r, rvec):
rvec[rvec.nonzero()[0].max() + 1] = r
# Define, A,b
A = csr_array(array([[-2, 1, 0, 0, 0, 0],
[1, -2, 1, 0, 0, 0],
[0, 1, -2, 1, 0, 0],
[0, 0, 1, -2, 1, 0],
[0, 0, 0, 1, -2, 1],
[0, 0, 0, 0, 1, -2]]))
b = ones((A.shape[0],))
maxiter = 1
rvec = zeros(maxiter + 1)
rvec[0] = 1.0
def callback(r):
return store_residual(r, rvec)
x, flag = gmres(A, b, x0=zeros(A.shape[0]), rtol=1e-16,
maxiter=maxiter, callback=callback)
# Expected output from SciPy 1.0.0
assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
# Test preconditioned callback
M = 1e-3 * np.eye(A.shape[0])
rvec = zeros(maxiter + 1)
rvec[0] = 1.0
x, flag = gmres(A, b, M=M, rtol=1e-16, maxiter=maxiter,
callback=callback)
# Expected output from SciPy 1.0.0
# (callback has preconditioned residual!)
assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]),
rtol=1e-10)
def test_abi(self):
# Check we don't segfault on gmres with complex argument
A = eye(2)
b = ones(2)
r_x, r_info = gmres(A, b)
r_x = r_x.astype(complex)
x, info = gmres(A.astype(complex), b.astype(complex))
assert iscomplexobj(x)
assert_allclose(r_x, x)
assert r_info == info
@pytest.mark.fail_slow(10)
def test_atol_legacy(self):
A = eye(2)
b = ones(2)
x, info = gmres(A, b, rtol=1e-5)
assert np.linalg.norm(A @ x - b) <= 1e-5 * np.linalg.norm(b)
assert_allclose(x, b, atol=0, rtol=1e-8)
rndm = np.random.RandomState(12345)
A = rndm.rand(30, 30)
b = 1e-6 * ones(30)
x, info = gmres(A, b, rtol=1e-7, restart=20)
assert np.linalg.norm(A @ x - b) > 1e-7
A = eye(2)
b = 1e-10 * ones(2)
x, info = gmres(A, b, rtol=1e-8, atol=0)
assert np.linalg.norm(A @ x - b) <= 1e-8 * np.linalg.norm(b)
def test_defective_precond_breakdown(self):
# Breakdown due to defective preconditioner
M = np.eye(3)
M[2, 2] = 0
b = np.array([0, 1, 1])
x = np.array([1, 0, 0])
A = np.diag([2, 3, 4])
x, info = gmres(A, b, x0=x, M=M, rtol=1e-15, atol=0)
# Should not return nans, nor terminate with false success
assert not np.isnan(x).any()
if info == 0:
assert np.linalg.norm(A @ x - b) <= 1e-15 * np.linalg.norm(b)
# The solution should be OK outside null space of M
assert_allclose(M @ (A @ x), M @ b)
def test_defective_matrix_breakdown(self):
# Breakdown due to defective matrix
A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
b = np.array([1, 0, 1])
rtol = 1e-8
x, info = gmres(A, b, rtol=rtol, atol=0)
# Should not return nans, nor terminate with false success
assert not np.isnan(x).any()
if info == 0:
assert np.linalg.norm(A @ x - b) <= rtol * np.linalg.norm(b)
# The solution should be OK outside null space of A
assert_allclose(A @ (A @ x), A @ b)
@pytest.mark.filterwarnings(f"ignore:{CB_TYPE_FILTER}:DeprecationWarning")
def test_callback_type(self):
# The legacy callback type changes meaning of 'maxiter'
np.random.seed(1)
A = np.random.rand(20, 20)
b = np.random.rand(20)
cb_count = [0]
def pr_norm_cb(r):
cb_count[0] += 1
assert isinstance(r, float)
def x_cb(x):
cb_count[0] += 1
assert isinstance(x, np.ndarray)
# 2 iterations is not enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50)
assert info == 2
assert cb_count[0] == 2
# With `callback_type` specified, no warning should be raised
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50, callback_type='legacy')
assert info == 2
assert cb_count[0] == 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
maxiter=2, restart=50, callback_type='pr_norm')
assert info == 0
assert cb_count[0] > 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=x_cb, maxiter=2,
restart=50, callback_type='x')
assert info == 0
assert cb_count[0] == 1
def test_callback_x_monotonic(self):
# Check that callback_type='x' gives monotonic norm decrease
rng = np.random.RandomState(1)
A = rng.rand(20, 20) + np.eye(20)
b = rng.rand(20)
prev_r = [np.inf]
count = [0]
def x_cb(x):
r = np.linalg.norm(A @ x - b)
assert r <= prev_r[0]
prev_r[0] = r
count[0] += 1
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=x_cb, maxiter=20,
restart=10, callback_type='x')
assert info == 20
assert count[0] == 20

View file

@ -0,0 +1,225 @@
"""Tests for the linalg._isolve.lgmres module
"""
import threading
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import pytest
from platform import python_implementation
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_array, eye_array, random_array
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg._isolve import lgmres, gmres
Am = csr_array(array([[-2, 1, 0, 0, 0, 9],
[1, -2, 1, 0, 5, 0],
[0, 1, -2, 1, 0, 0],
[0, 0, 1, -2, 1, 0],
[0, 3, 0, 1, -2, 1],
[1, 0, 0, 0, 1, -2]]))
b = array([1, 2, 3, 4, 5, 6])
count = threading.local() # [0]
niter = threading.local() # [0]
def matvec(v):
if not hasattr(count, 'c'):
count.c = [0]
count.c[0] += 1
return Am@v
def cb(v):
if not hasattr(niter, 'n'):
niter.n = [0]
niter.n[0] += 1
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
if not hasattr(niter, 'n'):
niter.n = [0]
if not hasattr(count, 'c'):
count.c = [0]
count.c[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
inner_m=6, rtol=1e-14, **kw)
count_0 = count.c[0]
assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
return x0, count_0
class TestLGMRES:
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
niter.n[0] = 0
x1, count_1 = do_solve(M=M, callback=cb)
assert count_1 == 3
assert count_1 < count_0/2
assert allclose(x1, x0, rtol=1e-14)
assert niter.n[0] < 3
def test_outer_v(self):
# Check that the augmentation vectors behave as expected
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 2, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
# ---
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
store_outer_Av=False)
assert_(array([v[1] is None for v in outer_v]).all())
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 3, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
@pytest.mark.skipif(python_implementation() == 'PyPy',
reason="Fails on PyPy CI runs. See #9507")
def test_arnoldi(self):
rng = np.random.default_rng(123)
A = eye_array(2000) + random_array((2000, 2000), density=5e-4, rng=rng)
b = rng.random(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]), inner_m=10, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=10, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
norm = np.linalg.norm(A.dot(x0) - b)
assert_(norm > 1e-4)
assert_allclose(x0, x1)
def test_cornercase(self):
rng = np.random.RandomState(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye_array(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, rtol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = rng.rand(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, rtol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye_array(3, format='lil')
A[1, 1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = lgmres(A, b, rtol=0, maxiter=10)
assert_equal(info, 1)
def test_breakdown_with_outer_v(self):
A = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([1, 2])
x = np.linalg.solve(A, b)
v0 = np.array([1, 0])
# The inner iteration should converge to the correct solution,
# since it's in the outer vector list
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
assert_allclose(xp, x, atol=1e-12)
def test_breakdown_underdetermined(self):
# Should find LSQ solution in the Krylov span in one inner
# iteration, despite solver breakdown from nilpotent A.
A = np.array([[0, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]], dtype=float)
bs = [
np.array([1, 1, 1, 1]),
np.array([1, 1, 1, 0]),
np.array([1, 1, 0, 0]),
np.array([1, 0, 0, 0]),
]
for b in bs:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, maxiter=1)
resp = np.linalg.norm(A.dot(xp) - b)
K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
x = K.dot(y)
res = np.linalg.norm(A.dot(x) - b)
assert_allclose(resp, res, err_msg=repr(b))
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)

View file

@ -0,0 +1,185 @@
"""
Copyright (C) 2010 David Fong and Michael Saunders
Distributed under the same license as SciPy
Testing Code for LSMR.
03 Jun 2010: First version release with lsmr.py
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from numpy import array, arange, eye, zeros, ones, transpose, hstack
from numpy.linalg import norm
from numpy.testing import assert_allclose
import pytest
from scipy.sparse import coo_array
from scipy.sparse.linalg._interface import aslinearoperator
from scipy.sparse.linalg import lsmr
from .test_lsqr import G, b
class TestLSMR:
def setup_method(self):
self.n = 10
self.m = 10
def assertCompatibleSystem(self, A, xtrue):
Afun = aslinearoperator(A)
b = Afun.matvec(xtrue)
x = lsmr(A, b)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testIdentityACase1(self):
A = eye(self.n)
xtrue = zeros((self.n, 1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase2(self):
A = eye(self.n)
xtrue = ones((self.n,1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase3(self):
A = eye(self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A, xtrue)
def testBidiagonalA(self):
A = lowerBidiagonalMatrix(20,self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A,xtrue)
def testScalarB(self):
A = array([[1.0, 2.0]])
b = 3.0
x = lsmr(A, b)[0]
assert norm(A.dot(x) - b) == pytest.approx(0)
def testComplexX(self):
A = eye(self.n)
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
self.assertCompatibleSystem(A, xtrue)
def testComplexX0(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1))
b = aslinearoperator(A).matvec(xtrue)
x0 = zeros(self.n, dtype=complex)
x = lsmr(A, b, x0=x0)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testComplexA(self):
A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1).astype(complex))
self.assertCompatibleSystem(A, xtrue)
def testComplexB(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
b = aslinearoperator(A).matvec(xtrue)
x = lsmr(A, b)[0]
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
def testColumnB(self):
A = eye(self.n)
b = ones((self.n, 1))
x = lsmr(A, b)[0]
assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
def testInitialization(self):
# Test that the default setting is not modified
x_ref, _, itn_ref, normr_ref, *_ = lsmr(G, b)
assert_allclose(norm(b - G@x_ref), normr_ref, atol=1e-6)
# Test passing zeros yields similar result
x0 = zeros(b.shape)
x = lsmr(G, b, x0=x0)[0]
assert_allclose(x, x_ref)
# Test warm-start with single iteration
x0 = lsmr(G, b, maxiter=1)[0]
x, _, itn, normr, *_ = lsmr(G, b, x0=x0)
assert_allclose(norm(b - G@x), normr, atol=1e-6)
# NOTE(gh-12139): This doesn't always converge to the same value as
# ref because error estimates will be slightly different when calculated
# from zeros vs x0 as a result only compare norm and itn (not x).
# x generally converges 1 iteration faster because it started at x0.
# itn == itn_ref means that lsmr(x0) took an extra iteration see above.
# -1 is technically possible but is rare (1 in 100000) so it's more
# likely to be an error elsewhere.
assert itn - itn_ref in (0, 1)
# If an extra iteration is performed normr may be 0, while normr_ref
# may be much larger.
assert normr < normr_ref * (1 + 1e-6)
class TestLSMRReturns:
def setup_method(self):
self.n = 10
self.A = lowerBidiagonalMatrix(20, self.n)
self.xtrue = transpose(arange(self.n, 0, -1))
self.Afun = aslinearoperator(self.A)
self.b = self.Afun.matvec(self.xtrue)
self.x0 = ones(self.n)
self.x00 = self.x0.copy()
self.returnValues = lsmr(self.A, self.b)
self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
def test_unchanged_x0(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValuesX0
assert_allclose(self.x00, self.x0)
def testNormr(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(self.b - self.Afun.matvec(x)) == pytest.approx(normr)
def testNormar(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert (norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))
== pytest.approx(normar))
def testNormx(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert norm(x) == pytest.approx(normx)
def lowerBidiagonalMatrix(m, n):
# This is a simple example for testing LSMR.
# It uses the leading m*n submatrix from
# A = [ 1
# 1 2
# 2 3
# 3 4
# ...
# n ]
# suitably padded by zeros.
#
# 04 Jun 2010: First version for distribution with lsmr.py
if m <= n:
row = hstack((arange(m, dtype=int),
arange(1, m, dtype=int)))
col = hstack((arange(m, dtype=int),
arange(m-1, dtype=int)))
data = hstack((arange(1, m+1, dtype=float),
arange(1,m, dtype=float)))
return coo_array((data, (row, col)), shape=(m,n))
else:
row = hstack((arange(n, dtype=int),
arange(1, n+1, dtype=int)))
col = hstack((arange(n, dtype=int),
arange(n, dtype=int)))
data = hstack((arange(1, n+1, dtype=float),
arange(1,n+1, dtype=float)))
return coo_array((data,(row, col)), shape=(m,n))

View file

@ -0,0 +1,120 @@
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal, assert_equal
import pytest
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg import lsqr
# Set up a test problem
n = 35
G = np.eye(n)
normal = np.random.normal
norm = np.linalg.norm
for jj in range(5):
gg = normal(size=n)
hh = gg * gg.T
G += (hh + hh.T) * 0.5
G += normal(size=n) * normal(size=n)
b = normal(size=n)
# tolerance for atol/btol keywords of lsqr()
tol = 2e-10
# tolerances for testing the results of the lsqr() call with assert_allclose
# These tolerances are a bit fragile - see discussion in gh-15301.
atol_test = 4e-10
rtol_test = 2e-8
show = False
maxit = None
def test_lsqr_basic():
b_copy = b.copy()
xo, *_ = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
assert_array_equal(b_copy, b)
svx = np.linalg.solve(G, b)
assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
# Now the same but with damp > 0.
# This is equivalent to solving the extended system:
# ( G ) @ x = ( b )
# ( damp*I ) ( 0 )
damp = 1.5
xo, *_ = lsqr(
G, b, damp=damp, show=show, atol=tol, btol=tol, iter_lim=maxit)
Gext = np.r_[G, damp * np.eye(G.shape[1])]
bext = np.r_[b, np.zeros(G.shape[1])]
svx, *_ = np.linalg.lstsq(Gext, bext, rcond=None)
assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
def test_gh_2466():
row = np.array([0, 0])
col = np.array([0, 1])
val = np.array([1, -1])
A = scipy.sparse.coo_array((val, (row, col)), shape=(1, 2))
b = np.asarray([4])
lsqr(A, b)
def test_well_conditioned_problems():
# Test that sparse the lsqr solver returns the right solution
# on various problems with different random seeds.
# This is a non-regression test for a potential ZeroDivisionError
# raised when computing the `test2` & `test3` convergence conditions.
n = 10
A_sparse = scipy.sparse.eye_array(n, n)
A_dense = A_sparse.toarray()
with np.errstate(invalid='raise'):
for seed in range(30):
rng = np.random.RandomState(seed + 10)
beta = rng.rand(n)
beta[beta == 0] = 0.00001 # ensure that all the betas are not null
b = A_sparse @ beta[:, np.newaxis]
output = lsqr(A_sparse, b, show=show)
# Check that the termination condition corresponds to an approximate
# solution to Ax = b
assert_equal(output[1], 1)
solution = output[0]
# Check that we recover the ground truth solution
assert_allclose(solution, beta)
# Sanity check: compare to the dense array solver
reference_solution = np.linalg.solve(A_dense, b).ravel()
assert_allclose(solution, reference_solution)
def test_b_shapes():
# Test b being a scalar.
A = np.array([[1.0, 2.0]])
b = 3.0
x = lsqr(A, b)[0]
assert norm(A.dot(x) - b) == pytest.approx(0)
# Test b being a column vector.
A = np.eye(10)
b = np.ones((10, 1))
x = lsqr(A, b)[0]
assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
def test_initialization():
# Test the default setting is the same as zeros
b_copy = b.copy()
x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
x0 = np.zeros(x_ref[0].shape)
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_array_equal(b_copy, b)
assert_allclose(x_ref[0], x[0])
# Test warm-start with single iteration
x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_allclose(x_ref[0], x[0])
assert_array_equal(b_copy, b)

View file

@ -0,0 +1,97 @@
import numpy as np
from numpy.linalg import norm
from numpy.testing import assert_equal, assert_allclose, assert_
from scipy.sparse.linalg._isolve import minres
from pytest import raises as assert_raises
def get_sample_problem():
# A random 10 x 10 symmetric matrix
rng = np.random.RandomState(1234)
matrix = rng.rand(10, 10)
matrix = matrix + matrix.T
# A random vector of length 10
vector = rng.rand(10)
return matrix, vector
def test_singular():
A, b = get_sample_problem()
A[0, ] = 0
b[0] = 0
xp, info = minres(A, b)
assert_equal(info, 0)
assert norm(A @ xp - b) <= 1e-5 * norm(b)
def test_x0_is_used_by():
A, b = get_sample_problem()
# Random x0 to feed minres
rng = np.random.RandomState(12345)
x0 = rng.rand(10)
trace = []
def trace_iterates(xk):
trace.append(xk)
minres(A, b, x0=x0, callback=trace_iterates)
trace_with_x0 = trace
trace = []
minres(A, b, callback=trace_iterates)
assert_(not np.array_equal(trace_with_x0[0], trace[0]))
def test_shift():
A, b = get_sample_problem()
shift = 0.5
shifted_A = A - shift * np.eye(10)
x1, info1 = minres(A, b, shift=shift)
x2, info2 = minres(shifted_A, b)
assert_equal(info1, 0)
assert_allclose(x1, x2, rtol=1e-5)
def test_asymmetric_fail():
"""Asymmetric matrix should raise `ValueError` when check=True"""
A, b = get_sample_problem()
A[1, 2] = 1
A[2, 1] = 2
with assert_raises(ValueError):
xp, info = minres(A, b, check=True)
def test_minres_non_default_x0():
rng = np.random.RandomState(1234)
rtol = 1e-6
a = rng.randn(5, 5)
a = np.dot(a, a.T)
b = rng.randn(5)
c = rng.randn(5)
x = minres(a, b, x0=c, rtol=rtol)[0]
assert norm(a @ x - b) <= rtol * norm(b)
def test_minres_precond_non_default_x0():
rng = np.random.RandomState(12345)
rtol = 1e-6
a = rng.randn(5, 5)
a = np.dot(a, a.T)
b = rng.randn(5)
c = rng.randn(5)
m = rng.randn(5, 5)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, rtol=rtol)[0]
assert norm(a @ x - b) <= rtol * norm(b)
def test_minres_precond_exact_x0():
rng = np.random.RandomState(1234)
rtol = 1e-6
a = np.eye(10)
b = np.ones(10)
c = np.ones(10)
m = rng.randn(10, 10)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, rtol=rtol)[0]
assert norm(a @ x - b) <= rtol * norm(b)

View file

@ -0,0 +1,9 @@
import numpy as np
from pytest import raises as assert_raises
import scipy.sparse.linalg._isolve.utils as utils
def test_make_system_bad_shape():
assert_raises(ValueError,
utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))

View file

@ -0,0 +1,179 @@
import numpy as np
from .iterative import _get_atol_rtol
from .utils import make_system
__all__ = ['tfqmr']
def tfqmr(A, b, x0=None, *, rtol=1e-5, atol=0., maxiter=None, M=None,
callback=None, show=False):
"""
Solve ``Ax = b`` with the Transpose-Free Quasi-Minimal Residual method.
Parameters
----------
A : {sparse array, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, `A` can be a linear operator which can
produce ``Ax`` using, e.g.,
`scipy.sparse.linalg.LinearOperator`.
b : {ndarray}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {ndarray}
Starting guess for the solution.
rtol, atol : float, optional
Parameters for the convergence test. For convergence,
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
The default is ``rtol=1e-5``, the default for ``atol`` is ``0.0``.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
Default is ``min(10000, ndofs * 10)``, where ``ndofs = A.shape[0]``.
M : {sparse array, ndarray, LinearOperator}
Inverse of the preconditioner of A. M should approximate the
inverse of A and be easy to solve for (see Notes). Effective
preconditioning dramatically improves the rate of convergence,
which implies that fewer iterations are needed to reach a given
error tolerance. By default, no preconditioner is used.
callback : function, optional
User-supplied function to call after each iteration. It is called
as ``callback(xk)``, where ``xk`` is the current solution vector.
show : bool, optional
Specify ``show = True`` to show the convergence, ``show = False`` is
to close the output of the convergence.
Default is `False`.
Returns
-------
x : ndarray
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The Transpose-Free QMR algorithm is derived from the CGS algorithm.
However, unlike CGS, the convergence curves for the TFQMR method is
smoothed by computing a quasi minimization of the residual norm. The
implementation supports left preconditioner, and the "residual norm"
to compute in convergence criterion is actually an upper bound on the
actual residual norm ``||b - Axk||``.
References
----------
.. [1] R. W. Freund, A Transpose-Free Quasi-Minimal Residual Algorithm for
Non-Hermitian Linear Systems, SIAM J. Sci. Comput., 14(2), 470-482,
1993.
.. [2] Y. Saad, Iterative Methods for Sparse Linear Systems, 2nd edition,
SIAM, Philadelphia, 2003.
.. [3] C. T. Kelley, Iterative Methods for Linear and Nonlinear Equations,
number 16 in Frontiers in Applied Mathematics, SIAM, Philadelphia,
1995.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import tfqmr
>>> A = csc_array([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = tfqmr(A, b, atol=0.0)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
# Check data type
dtype = A.dtype
if np.issubdtype(dtype, np.int64):
dtype = float
A = A.astype(dtype)
if np.issubdtype(b.dtype, np.int64):
b = b.astype(dtype)
A, M, x, b = make_system(A, M, x0, b)
# Check if the R.H.S is a zero vector
if np.linalg.norm(b) == 0.:
x = b.copy()
return (x, 0)
ndofs = A.shape[0]
if maxiter is None:
maxiter = min(10000, ndofs * 10)
if x0 is None:
r = b.copy()
else:
r = b - A.matvec(x)
u = r
w = r.copy()
# Take rstar as b - Ax0, that is rstar := r = b - Ax0 mathematically
rstar = r
v = M.matvec(A.matvec(r))
uhat = v
d = theta = eta = 0.
# at this point we know rstar == r, so rho is always real
rho = np.inner(rstar.conjugate(), r).real
rhoLast = rho
r0norm = np.sqrt(rho)
tau = r0norm
if r0norm == 0:
return (x, 0)
# we call this to get the right atol and raise errors as necessary
atol, _ = _get_atol_rtol('tfqmr', r0norm, atol, rtol)
for iter in range(maxiter):
even = iter % 2 == 0
if (even):
vtrstar = np.inner(rstar.conjugate(), v)
# Check breakdown
if vtrstar == 0.:
return (x, -1)
alpha = rho / vtrstar
uNext = u - alpha * v # [1]-(5.6)
w -= alpha * uhat # [1]-(5.8)
d = u + (theta**2 / alpha) * eta * d # [1]-(5.5)
# [1]-(5.2)
theta = np.linalg.norm(w) / tau
c = np.sqrt(1. / (1 + theta**2))
tau *= theta * c
# Calculate step and direction [1]-(5.4)
eta = (c**2) * alpha
z = M.matvec(d)
x += eta * z
if callback is not None:
callback(x)
# Convergence criterion
if tau * np.sqrt(iter+1) < atol:
if (show):
print("TFQMR: Linear solve converged due to reach TOL "
f"iterations {iter+1}")
return (x, 0)
if (not even):
# [1]-(5.7)
rho = np.inner(rstar.conjugate(), w)
beta = rho / rhoLast
u = w + beta * u
v = beta * uhat + (beta**2) * v
uhat = M.matvec(A.matvec(u))
v += uhat
else:
uhat = M.matvec(A.matvec(uNext))
u = uNext
rhoLast = rho
if (show):
print("TFQMR: Linear solve not converged due to reach MAXIT "
f"iterations {iter+1}")
return (x, maxiter)

View file

@ -0,0 +1,121 @@
__docformat__ = "restructuredtext en"
__all__ = []
from numpy import asanyarray, asarray, array, zeros
from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator, \
IdentityOperator
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
('D','D'):'D'}
def coerce(x,y):
if x not in 'fdFD':
x = 'd'
if y not in 'fdFD':
y = 'd'
return _coerce_rules[x,y]
def id(x):
return x
def make_system(A, M, x0, b):
"""Make a linear system Ax=b
Parameters
----------
A : LinearOperator
sparse or dense matrix (or any valid input to aslinearoperator)
M : {LinearOperator, Nones}
preconditioner
sparse or dense matrix (or any valid input to aslinearoperator)
x0 : {array_like, str, None}
initial guess to iterative method.
``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.
Default is `None`, which means using the zero initial guess.
b : array_like
right hand side
Returns
-------
(A, M, x, b)
A : LinearOperator
matrix of the linear system
M : LinearOperator
preconditioner
x : rank 1 ndarray
initial guess
b : rank 1 ndarray
right hand side
"""
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '
'incompatible')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
if hasattr(A,'dtype'):
xtype = A.dtype.char
else:
xtype = A.matvec(b).dtype.char
xtype = coerce(xtype, b.dtype.char)
b = asarray(b,dtype=xtype) # make b the same type as x
b = b.ravel()
# process preconditioner
if M is None:
if hasattr(A_,'psolve'):
psolve = A_.psolve
else:
psolve = id
if hasattr(A_,'rpsolve'):
rpsolve = A_.rpsolve
else:
rpsolve = id
if psolve is id and rpsolve is id:
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
dtype=A.dtype)
else:
M = aslinearoperator(M)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
# set initial guess
if x0 is None:
x = zeros(N, dtype=xtype)
elif isinstance(x0, str):
if x0 == 'Mb': # use nonzero initial guess ``M @ b``
bCopy = b.copy()
x = M.matvec(bCopy)
else:
x = array(x0, dtype=xtype)
if not (x.shape == (N, 1) or x.shape == (N,)):
raise ValueError(f'shapes of A {A.shape} and '
f'x0 {x.shape} are incompatible')
x = x.ravel()
return A, M, x, b

View file

@ -0,0 +1,940 @@
"""
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
__all__ = ['expm', 'inv', 'matrix_power']
import numpy as np
from scipy.linalg._basic import solve, solve_triangular
from scipy.sparse._base import issparse
from scipy.sparse.linalg import spsolve
from scipy.sparse._sputils import is_pydata_spmatrix, isintlike
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse._construct import eye_array
from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse arrays
Parameters
----------
A : (M, M) sparse arrays
square matrix to be inverted
Returns
-------
Ainv : (M, M) sparse arrays
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
`scipy.linalg.inv`.
Examples
--------
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import inv
>>> A = csc_array([[1., 0.], [1., 2.]])
>>> Ainv = inv(A)
>>> Ainv
<Compressed Sparse Column sparse array of dtype 'float64'
with 3 stored elements and shape (2, 2)>
>>> A.dot(Ainv)
<Compressed Sparse Column sparse array of dtype 'float64'
with 2 stored elements and shape (2, 2)>
>>> A.dot(Ainv).toarray()
array([[ 1., 0.],
[ 0., 1.]])
.. versionadded:: 0.12.0
"""
# Check input
if not (issparse(A) or is_pydata_spmatrix(A)):
raise TypeError('Input must be a sparse arrays')
# Use sparse direct solver to solve "AX = I" accurately
I = _ident_like(A)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse arrays
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# Check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse arrays).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return np.max(v)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if issparse(A):
lower_part = scipy.sparse.tril(A, -1)
# Check structural upper triangularity,
# then coincidental upper triangularity if needed.
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
elif is_pydata_spmatrix(A):
import sparse
lower_part = sparse.tril(A, -1)
return lower_part.nnz == 0
else:
return not np.tril(A, -1).any()
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if (not issparse(A) and not issparse(B)
and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.result_type(*[x.dtype for x in args])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse arrays.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse arrays.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper:
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse array
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import expm
>>> A = csc_array([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
>>> A.toarray()
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int64)
>>> Aexp = expm(A)
>>> Aexp
<Compressed Sparse Column sparse array of dtype 'float64'
with 3 stored elements and shape (3, 3)>
>>> Aexp.toarray()
array([[ 2.71828183, 0. , 0. ],
[ 0. , 7.3890561 , 0. ],
[ 0. , 0. , 20.08553692]])
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, list | tuple | np.matrix):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# gracefully handle size-0 input,
# carefully handling sparse scenario
if A.shape == (0, 0):
out = np.zeros([0, 0], dtype=A.dtype)
if issparse(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return out
# Trivial case
if A.shape == (1, 1):
out = [[np.exp(A[0, 0])]]
# Avoid indiscriminate casting to ndarray to
# allow for sparse or other strange arrays
if issparse(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return np.array(out)
# Ensure input is of float type, to avoid integer overflows etc.
if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A))
and not np.issubdtype(A.dtype, np.inexact)):
A = A.astype(float)
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
if eta_5 == 0:
# Nilpotent special case
s = 0
else:
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if issparse(U) or is_pydata_spmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _exp_sinch(a, x):
"""
Stably evaluate exp(a)*sinh(x)/x
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
if abs(x) < 0.0135:
x2 = x*x
return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
else:
return (np.exp(a + x) - np.exp(a - x)) / (2*x)
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * _exp_sinch(a, b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse arrays, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = np.ravel(T.diagonal().copy())
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
c_i = {3: 100800.,
5: 10059033600.,
7: 4487938430976000.,
9: 5914384781877411840000.,
13: 113250775606021113483283660800000000.
}
abs_c_recip = c_i[m]
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
def matrix_power(A, power):
"""
Raise a square matrix to the integer power, `power`.
For non-negative integers, ``A**power`` is computed using repeated
matrix multiplications. Negative integers are not supported.
Parameters
----------
A : (M, M) square sparse array or matrix
sparse array that will be raised to power `power`
power : int
Exponent used to raise sparse array `A`
Returns
-------
A**power : (M, M) sparse array or matrix
The output matrix will be the same shape as A, and will preserve
the class of A, but the format of the output may be changed.
Notes
-----
This uses a recursive implementation of the matrix power. For computing
the matrix power using a reasonably large `power`, this may be less efficient
than computing the product directly, using A @ A @ ... @ A.
This is contingent upon the number of nonzero entries in the matrix.
.. versionadded:: 1.12.0
Examples
--------
>>> from scipy import sparse
>>> A = sparse.csc_array([[0,1,0],[1,0,1],[0,1,0]])
>>> A.todense()
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
>>> (A @ A).todense()
array([[1, 0, 1],
[0, 2, 0],
[1, 0, 1]])
>>> A2 = sparse.linalg.matrix_power(A, 2)
>>> A2.todense()
array([[1, 0, 1],
[0, 2, 0],
[1, 0, 1]])
>>> A4 = sparse.linalg.matrix_power(A, 4)
>>> A4.todense()
array([[2, 0, 2],
[0, 4, 0],
[2, 0, 2]])
"""
M, N = A.shape
if M != N:
raise TypeError('sparse matrix is not square')
if isintlike(power):
power = int(power)
if power < 0:
raise ValueError('exponent must be >= 0')
if power == 0:
return eye_array(M, dtype=A.dtype)
if power == 1:
return A.copy()
tmp = matrix_power(A, power // 2)
if power % 2:
return A @ tmp @ tmp
else:
return tmp @ tmp
else:
raise ValueError("exponent must be an integer")

View file

@ -0,0 +1,195 @@
"""Sparse matrix norms.
"""
import numpy as np
from scipy.sparse import issparse
from scipy.sparse.linalg import svds
from scipy.sparse._sputils import convert_pydata_sparse_to_scipy
import scipy.sparse as sp
from numpy import sqrt, abs
__all__ = ['norm']
def _sparse_frobenius_norm(x):
data = sp._sputils._todata(x)
return np.linalg.norm(data)
def norm(x, ord=None, axis=None):
"""
Norm of a sparse matrix
This function is able to return one of seven different matrix norms,
depending on the value of the ``ord`` parameter.
Parameters
----------
x : a sparse array
Input sparse array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Notes
-----
Some of the ord are not implemented because some associated functions like,
_multi_svd_norm, are not yet available for sparse array.
This docstring is modified based on numpy.linalg.norm.
https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py
The following norms can be calculated:
===== ============================
ord norm for sparse arrays
===== ============================
None Frobenius norm
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
0 abs(x).sum(axis=axis)
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 Spectral norm (the largest singular value)
-2 Not implemented
other Not implemented
===== ============================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from scipy.sparse import csr_array, diags_array
>>> import numpy as np
>>> from scipy.sparse.linalg import norm
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> b = csr_array(b)
>>> norm(b)
7.745966692414834
>>> norm(b, 'fro')
7.745966692414834
>>> norm(b, np.inf)
9
>>> norm(b, -np.inf)
2
>>> norm(b, 1)
7
>>> norm(b, -1)
6
The matrix 2-norm or the spectral norm is the largest singular
value, computed approximately and with limitations.
>>> b = diags_array([-1, 1], offsets=[0, 1], shape=(9, 10))
>>> norm(b, 2)
1.9753...
"""
x = convert_pydata_sparse_to_scipy(x, target_format="csr")
if not issparse(x):
raise TypeError("input is not sparse. use numpy.linalg.norm")
# Check the default case first and handle it immediately.
if axis is None and ord in (None, 'fro', 'f'):
return _sparse_frobenius_norm(x)
# Some norms require functions that are not implemented for all types.
x = x.tocsr()
if axis is None:
axis = tuple(range(x.ndim))
elif not isinstance(axis, tuple):
msg = "'axis' must be None, an integer or a tuple of integers"
try:
int_axis = int(axis)
except TypeError as e:
raise TypeError(msg) from e
if axis != int_axis:
raise TypeError(msg)
axis = (int_axis,)
nd = x.ndim
if len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}'
raise ValueError(message)
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
# Only solver="lobpcg" supports all numpy dtypes
_, s, _ = svds(x, k=1, solver="lobpcg")
return s[0]
elif ord == -2:
raise NotImplementedError
#return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
return abs(x).sum(axis=row_axis).max()
elif ord == np.inf:
return abs(x).sum(axis=col_axis).max()
elif ord == -1:
return abs(x).sum(axis=row_axis).min()
elif ord == -np.inf:
return abs(x).sum(axis=col_axis).min()
elif ord in (None, 'f', 'fro'):
# The axis order does not matter for this norm.
return _sparse_frobenius_norm(x)
else:
raise ValueError("Invalid norm order for matrices.")
elif len(axis) == 1:
a, = axis
if not (-nd <= a < nd):
message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}'
raise ValueError(message)
if ord == np.inf:
M = abs(x).max(axis=a)
elif ord == -np.inf:
M = abs(x).min(axis=a)
elif ord == 0:
# Zero norm
M = (x != 0).sum(axis=a)
elif ord == 1:
# special case for speedup
M = abs(x).sum(axis=a)
elif ord in (2, None):
M = sqrt(abs(x).power(2).sum(axis=a))
else:
try:
ord + 1
except TypeError as e:
raise ValueError('Invalid norm order for vectors.') from e
M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
if hasattr(M, 'toarray'):
return M.toarray().ravel()
elif hasattr(M, 'A'):
return M.A.ravel()
else:
return M.ravel()
else:
raise ValueError("Improper number of dimensions to norm.")

View file

@ -0,0 +1,467 @@
"""Sparse block 1-norm estimator.
"""
import numpy as np
from scipy.sparse.linalg import aslinearoperator
__all__ = ['onenormest']
def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False):
"""
Compute a lower bound of the 1-norm of a sparse array.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can be transposed and that can
produce matrix products.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse array.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
Notes
-----
This is algorithm 2.4 of [1].
In [2] it is described as follows.
"This algorithm typically requires the evaluation of
about 4t matrix-vector products and almost invariably
produces a norm estimate (which is, in fact, a lower
bound on the norm) correct to within a factor 3."
.. versionadded:: 0.13.0
References
----------
.. [1] Nicholas J. Higham and Francoise Tisseur (2000),
"A Block Algorithm for Matrix 1-Norm Estimation,
with an Application to 1-Norm Pseudospectra."
SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201.
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009),
"A new scaling and squaring algorithm for the matrix exponential."
SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import onenormest
>>> A = csc_array([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float)
>>> A.toarray()
array([[ 1., 0., 0.],
[ 5., 8., 2.],
[ 0., -1., 0.]])
>>> onenormest(A)
9.0
>>> np.linalg.norm(A.toarray(), ord=1)
9.0
"""
# Check the input.
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected the operator to act like a square matrix')
# If the operator size is small compared to t,
# then it is easier to compute the exact norm.
# Otherwise estimate the norm.
n = A.shape[1]
if t >= n:
A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n)))
if A_explicit.shape != (n, n):
raise Exception('internal error: ',
'unexpected shape ' + str(A_explicit.shape))
col_abs_sums = abs(A_explicit).sum(axis=0)
if col_abs_sums.shape != (n, ):
raise Exception('internal error: ',
'unexpected shape ' + str(col_abs_sums.shape))
argmax_j = np.argmax(col_abs_sums)
v = elementary_vector(n, argmax_j)
w = A_explicit[:, argmax_j]
est = col_abs_sums[argmax_j]
else:
est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax)
# Report the norm estimate along with some certificates of the estimate.
if compute_v or compute_w:
result = (est,)
if compute_v:
result += (v,)
if compute_w:
result += (w,)
return result
else:
return est
def _blocked_elementwise(func):
"""
Decorator for an elementwise function, to apply it blockwise along
first dimension, to avoid excessive memory usage in temporaries.
"""
block_size = 2**20
def wrapper(x):
if x.shape[0] < block_size:
return func(x)
else:
y0 = func(x[:block_size])
y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)
y[:block_size] = y0
del y0
for j in range(block_size, x.shape[0], block_size):
y[j:j+block_size] = func(x[j:j+block_size])
return y
return wrapper
@_blocked_elementwise
def sign_round_up(X):
"""
This should do the right thing for both real and complex matrices.
From Higham and Tisseur:
"Everything in this section remains valid for complex matrices
provided that sign(A) is redefined as the matrix (aij / |aij|)
(and sign(0) = 1) transposes are replaced by conjugate transposes."
"""
Y = X.copy()
Y[Y == 0] = 1
Y /= np.abs(Y)
return Y
@_blocked_elementwise
def _max_abs_axis1(X):
return np.max(np.abs(X), axis=1)
def _sum_abs_axis0(X):
block_size = 2**20
r = None
for j in range(0, X.shape[0], block_size):
y = np.sum(np.abs(X[j:j+block_size]), axis=0)
if r is None:
r = y
else:
r += y
return r
def elementary_vector(n, i):
v = np.zeros(n, dtype=float)
v[i] = 1
return v
def vectors_are_parallel(v, w):
# Columns are considered parallel when they are equal or negative.
# Entries are required to be in {-1, 1},
# which guarantees that the magnitudes of the vectors are identical.
if v.ndim != 1 or v.shape != w.shape:
raise ValueError('expected conformant vectors with entries in {-1,1}')
n = v.shape[0]
return np.dot(v, w) == n
def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y):
for v in X.T:
if not any(vectors_are_parallel(v, w) for w in Y.T):
return False
return True
def column_needs_resampling(i, X, Y=None):
# column i of X needs resampling if either
# it is parallel to a previous column of X or
# it is parallel to a column of Y
n, t = X.shape
v = X[:, i]
if any(vectors_are_parallel(v, X[:, j]) for j in range(i)):
return True
if Y is not None:
if any(vectors_are_parallel(v, w) for w in Y.T):
return True
return False
def resample_column(i, X):
X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
def _algorithm_2_2(A, AT, t):
"""
This is Algorithm 2.2.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Returns
-------
g : sequence
A non-negative decreasing vector
such that g[j] is a lower bound for the 1-norm
of the column of A of jth largest 1-norm.
The first entry of this vector is therefore a lower bound
on the 1-norm of the linear operator A.
This sequence has length t.
ind : sequence
The ith entry of ind is the index of the column A whose 1-norm
is given by g[i].
This sequence of indices has length t, and its entries are
chosen from range(n), possibly with repetition,
where n is the order of the operator A.
Notes
-----
This algorithm is mainly for testing.
It uses the 'ind' array in a way that is similar to
its usage in algorithm 2.4. This algorithm 2.2 may be easier to test,
so it gives a chance of uncovering bugs related to indexing
which could have propagated less noticeably to algorithm 2.4.
"""
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
n = A_linear_operator.shape[0]
# Initialize the X block with columns of unit 1-norm.
X = np.ones((n, t))
if t > 1:
X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1
X /= float(n)
# Iteratively improve the lower bounds.
# Track extra things, to assert invariants for debugging.
g_prev = None
h_prev = None
k = 1
ind = range(t)
while True:
Y = np.asarray(A_linear_operator.matmat(X))
g = _sum_abs_axis0(Y)
best_j = np.argmax(g)
g.sort()
g = g[::-1]
S = sign_round_up(Y)
Z = np.asarray(AT_linear_operator.matmat(S))
h = _max_abs_axis1(Z)
# If this algorithm runs for fewer than two iterations,
# then its return values do not have the properties indicated
# in the description of the algorithm.
# In particular, the entries of g are not 1-norms of any
# column of A until the second iteration.
# Therefore we will require the algorithm to run for at least
# two iterations, even though this requirement is not stated
# in the description of the algorithm.
if k >= 2:
if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])):
break
ind = np.argsort(h)[::-1][:t]
h = h[ind]
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
# Check invariant (2.2).
if k >= 2:
if not less_than_or_close(g_prev[0], h_prev[0]):
raise Exception('invariant (2.2) is violated')
if not less_than_or_close(h_prev[0], g[0]):
raise Exception('invariant (2.2) is violated')
# Check invariant (2.3).
if k >= 3:
for j in range(t):
if not less_than_or_close(g[j], g_prev[j]):
raise Exception('invariant (2.3) is violated')
# Update for the next iteration.
g_prev = g
h_prev = h
k += 1
# Return the lower bounds and the corresponding column indices.
return g, ind
def _onenormest_core(A, AT, t, itmax):
"""
Compute a lower bound of the 1-norm of a sparse array.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse array.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4.
"""
# This function is a more or less direct translation
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
if itmax < 2:
raise ValueError('at least two iterations are required')
if t < 1:
raise ValueError('at least one column is required')
n = A.shape[0]
if t >= n:
raise ValueError('t should be smaller than the order of A')
# Track the number of big*small matrix multiplications
# and the number of resamplings.
nmults = 0
nresamples = 0
# "We now explain our choice of starting matrix. We take the first
# column of X to be the vector of 1s [...] This has the advantage that
# for a matrix with nonnegative elements the algorithm converges
# with an exact estimate on the second iteration, and such matrices
# arise in applications [...]"
X = np.ones((n, t), dtype=float)
# "The remaining columns are chosen as rand{-1,1},
# with a check for and correction of parallel columns,
# exactly as for S in the body of the algorithm."
if t > 1:
for i in range(1, t):
# These are technically initial samples, not resamples,
# so the resampling count is not incremented.
resample_column(i, X)
for i in range(t):
while column_needs_resampling(i, X):
resample_column(i, X)
nresamples += 1
# "Choose starting matrix X with columns of unit 1-norm."
X /= float(n)
# "indices of used unit vectors e_j"
ind_hist = np.zeros(0, dtype=np.intp)
est_old = 0
S = np.zeros((n, t), dtype=float)
k = 1
ind = None
while True:
Y = np.asarray(A_linear_operator.matmat(X))
nmults += 1
mags = _sum_abs_axis0(Y)
est = np.max(mags)
best_j = np.argmax(mags)
if est > est_old or k == 2:
if k >= 2:
ind_best = ind[best_j]
w = Y[:, best_j]
# (1)
if k >= 2 and est <= est_old:
est = est_old
break
est_old = est
S_old = S
if k > itmax:
break
S = sign_round_up(Y)
del Y
# (2)
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
break
if t > 1:
# "Ensure that no column of S is parallel to another column of S
# or to a column of S_old by replacing columns of S by rand{-1,1}."
for i in range(t):
while column_needs_resampling(i, S, S_old):
resample_column(i, S)
nresamples += 1
del S_old
# (3)
Z = np.asarray(AT_linear_operator.matmat(S))
nmults += 1
h = _max_abs_axis1(Z)
del Z
# (4)
if k >= 2 and max(h) == h[ind_best]:
break
# "Sort h so that h_first >= ... >= h_last
# and re-order ind correspondingly."
#
# Later on, we will need at most t+len(ind_hist) largest
# entries, so drop the rest
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
del h
if t > 1:
# (5)
# Break if the most promising t vectors have been visited already.
if np.isin(ind[:t], ind_hist).all():
break
# Put the most promising unvisited vectors at the front of the list
# and put the visited vectors at the end of the list.
# Preserve the order of the indices induced by the ordering of h.
seen = np.isin(ind, ind_hist)
ind = np.concatenate((ind[~seen], ind[seen]))
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
new_ind = ind[:t][~np.isin(ind[:t], ind_hist)]
ind_hist = np.concatenate((ind_hist, new_ind))
k += 1
v = elementary_vector(n, ind_best)
return est, v, w, nmults, nresamples

View file

@ -0,0 +1,949 @@
import numpy as np
from scipy.sparse.linalg import LinearOperator
from scipy.sparse import kron, eye_array, dia_array
__all__ = ['LaplacianNd']
# Sakurai and Mikota classes are intended for tests and benchmarks
# and explicitly not included in the public API of this module.
class LaplacianNd(LinearOperator):
"""
The grid Laplacian in ``N`` dimensions and its eigenvalues/eigenvectors.
Construct Laplacian on a uniform rectangular grid in `N` dimensions
and output its eigenvalues and eigenvectors.
The Laplacian ``L`` is square, negative definite, real symmetric array
with signed integer entries and zeros otherwise.
Parameters
----------
grid_shape : tuple
A tuple of integers of length ``N`` (corresponding to the dimension of
the Lapacian), where each entry gives the size of that dimension. The
Laplacian matrix is square of the size ``np.prod(grid_shape)``.
boundary_conditions : {'neumann', 'dirichlet', 'periodic'}, optional
The type of the boundary conditions on the boundaries of the grid.
Valid values are ``'dirichlet'`` or ``'neumann'``(default) or
``'periodic'``.
dtype : dtype
Numerical type of the array. Default is ``np.int8``.
Methods
-------
toarray()
Construct a dense array from Laplacian data
tosparse()
Construct a sparse array from Laplacian data
eigenvalues(m=None)
Construct a 1D array of `m` largest (smallest in absolute value)
eigenvalues of the Laplacian matrix in ascending order.
eigenvectors(m=None):
Construct the array with columns made of `m` eigenvectors (``float``)
of the ``Nd`` Laplacian corresponding to the `m` ordered eigenvalues.
.. versionadded:: 1.12.0
Notes
-----
Compared to the MATLAB/Octave implementation [1] of 1-, 2-, and 3-D
Laplacian, this code allows the arbitrary N-D case and the matrix-free
callable option, but is currently limited to pure Dirichlet, Neumann or
Periodic boundary conditions only.
The Laplacian matrix of a graph (`scipy.sparse.csgraph.laplacian`) of a
rectangular grid corresponds to the negative Laplacian with the Neumann
conditions, i.e., ``boundary_conditions = 'neumann'``.
All eigenvalues and eigenvectors of the discrete Laplacian operator for
an ``N``-dimensional regular grid of shape `grid_shape` with the grid
step size ``h=1`` are analytically known [2].
References
----------
.. [1] https://github.com/lobpcg/blopex/blob/master/blopex_\
tools/matlab/laplacian/laplacian.m
.. [2] "Eigenvalues and eigenvectors of the second derivative", Wikipedia
https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors_\
of_the_second_derivative
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import LaplacianNd
>>> from scipy.sparse import diags_array, csgraph
>>> from scipy.linalg import eigvalsh
The one-dimensional Laplacian demonstrated below for pure Neumann boundary
conditions on a regular grid with ``n=6`` grid points is exactly the
negative graph Laplacian for the undirected linear graph with ``n``
vertices using the sparse adjacency matrix ``G`` represented by the
famous tri-diagonal matrix:
>>> n = 6
>>> G = diags_array(np.ones(n - 1), offsets=1, format='csr')
>>> Lf = csgraph.laplacian(G, symmetrized=True, form='function')
>>> grid_shape = (n, )
>>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann')
>>> np.array_equal(lap.matmat(np.eye(n)), -Lf(np.eye(n)))
True
Since all matrix entries of the Laplacian are integers, ``'int8'`` is
the default dtype for storing matrix representations.
>>> lap.tosparse()
<DIAgonal sparse array of dtype 'int8'
with 16 stored elements (3 diagonals) and shape (6, 6)>
>>> lap.toarray()
array([[-1, 1, 0, 0, 0, 0],
[ 1, -2, 1, 0, 0, 0],
[ 0, 1, -2, 1, 0, 0],
[ 0, 0, 1, -2, 1, 0],
[ 0, 0, 0, 1, -2, 1],
[ 0, 0, 0, 0, 1, -1]], dtype=int8)
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
True
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
True
Any number of extreme eigenvalues and/or eigenvectors can be computed.
>>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic')
>>> lap.eigenvalues()
array([-4., -3., -3., -1., -1., 0.])
>>> lap.eigenvalues()[-2:]
array([-1., 0.])
>>> lap.eigenvalues(2)
array([-1., 0.])
>>> lap.eigenvectors(1)
array([[0.40824829],
[0.40824829],
[0.40824829],
[0.40824829],
[0.40824829],
[0.40824829]])
>>> lap.eigenvectors(2)
array([[ 0.5 , 0.40824829],
[ 0. , 0.40824829],
[-0.5 , 0.40824829],
[-0.5 , 0.40824829],
[ 0. , 0.40824829],
[ 0.5 , 0.40824829]])
>>> lap.eigenvectors()
array([[ 0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 ,
0.40824829],
[-0.40824829, -0.57735027, -0.57735027, 0. , 0. ,
0.40824829],
[ 0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 ,
0.40824829],
[-0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 ,
0.40824829],
[ 0.40824829, -0.57735027, -0.57735027, 0. , 0. ,
0.40824829],
[-0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 ,
0.40824829]])
The two-dimensional Laplacian is illustrated on a regular grid with
``grid_shape = (2, 3)`` points in each dimension.
>>> grid_shape = (2, 3)
>>> n = np.prod(grid_shape)
Numeration of grid points is as follows:
>>> np.arange(n).reshape(grid_shape + (-1,))
array([[[0],
[1],
[2]],
<BLANKLINE>
[[3],
[4],
[5]]])
Each of the boundary conditions ``'dirichlet'``, ``'periodic'``, and
``'neumann'`` is illustrated separately; with ``'dirichlet'``
>>> lap = LaplacianNd(grid_shape, boundary_conditions='dirichlet')
>>> lap.tosparse()
<Compressed Sparse Row sparse array of dtype 'int8'
with 20 stored elements and shape (6, 6)>
>>> lap.toarray()
array([[-4, 1, 0, 1, 0, 0],
[ 1, -4, 1, 0, 1, 0],
[ 0, 1, -4, 0, 0, 1],
[ 1, 0, 0, -4, 1, 0],
[ 0, 1, 0, 1, -4, 1],
[ 0, 0, 1, 0, 1, -4]], dtype=int8)
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
True
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
True
>>> lap.eigenvalues()
array([-6.41421356, -5. , -4.41421356, -3.58578644, -3. ,
-1.58578644])
>>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
>>> np.allclose(lap.eigenvalues(), eigvals)
True
>>> np.allclose(lap.toarray() @ lap.eigenvectors(),
... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
True
with ``'periodic'``
>>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic')
>>> lap.tosparse()
<Compressed Sparse Row sparse array of dtype 'int8'
with 24 stored elements and shape (6, 6)>
>>> lap.toarray()
array([[-4, 1, 1, 2, 0, 0],
[ 1, -4, 1, 0, 2, 0],
[ 1, 1, -4, 0, 0, 2],
[ 2, 0, 0, -4, 1, 1],
[ 0, 2, 0, 1, -4, 1],
[ 0, 0, 2, 1, 1, -4]], dtype=int8)
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
True
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
True
>>> lap.eigenvalues()
array([-7., -7., -4., -3., -3., 0.])
>>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
>>> np.allclose(lap.eigenvalues(), eigvals)
True
>>> np.allclose(lap.toarray() @ lap.eigenvectors(),
... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
True
and with ``'neumann'``
>>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann')
>>> lap.tosparse()
<Compressed Sparse Row sparse array of dtype 'int8'
with 20 stored elements and shape (6, 6)>
>>> lap.toarray()
array([[-2, 1, 0, 1, 0, 0],
[ 1, -3, 1, 0, 1, 0],
[ 0, 1, -2, 0, 0, 1],
[ 1, 0, 0, -2, 1, 0],
[ 0, 1, 0, 1, -3, 1],
[ 0, 0, 1, 0, 1, -2]], dtype=int8)
>>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
True
>>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
True
>>> lap.eigenvalues()
array([-5., -3., -3., -2., -1., 0.])
>>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
>>> np.allclose(lap.eigenvalues(), eigvals)
True
>>> np.allclose(lap.toarray() @ lap.eigenvectors(),
... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
True
"""
def __init__(self, grid_shape, *,
boundary_conditions='neumann',
dtype=np.int8):
if boundary_conditions not in ('dirichlet', 'neumann', 'periodic'):
raise ValueError(
f"Unknown value {boundary_conditions!r} is given for "
"'boundary_conditions' parameter. The valid options are "
"'dirichlet', 'periodic', and 'neumann' (default)."
)
self.grid_shape = grid_shape
self.boundary_conditions = boundary_conditions
# LaplacianNd folds all dimensions in `grid_shape` into a single one
N = np.prod(grid_shape)
super().__init__(dtype=dtype, shape=(N, N))
def _eigenvalue_ordering(self, m):
"""Compute `m` largest eigenvalues in each of the ``N`` directions,
i.e., up to ``m * N`` total, order them and return `m` largest.
"""
grid_shape = self.grid_shape
if m is None:
indices = np.indices(grid_shape)
Leig = np.zeros(grid_shape)
else:
grid_shape_min = min(grid_shape,
tuple(np.ones_like(grid_shape) * m))
indices = np.indices(grid_shape_min)
Leig = np.zeros(grid_shape_min)
for j, n in zip(indices, grid_shape):
if self.boundary_conditions == 'dirichlet':
Leig += -4 * np.sin(np.pi * (j + 1) / (2 * (n + 1))) ** 2
elif self.boundary_conditions == 'neumann':
Leig += -4 * np.sin(np.pi * j / (2 * n)) ** 2
else: # boundary_conditions == 'periodic'
Leig += -4 * np.sin(np.pi * np.floor((j + 1) / 2) / n) ** 2
Leig_ravel = Leig.ravel()
ind = np.argsort(Leig_ravel)
eigenvalues = Leig_ravel[ind]
if m is not None:
eigenvalues = eigenvalues[-m:]
ind = ind[-m:]
return eigenvalues, ind
def eigenvalues(self, m=None):
"""Return the requested number of eigenvalues.
Parameters
----------
m : int, optional
The positive number of smallest eigenvalues to return.
If not provided, then all eigenvalues will be returned.
Returns
-------
eigenvalues : float array
The requested `m` smallest or all eigenvalues, in ascending order.
"""
eigenvalues, _ = self._eigenvalue_ordering(m)
return eigenvalues
def _ev1d(self, j, n):
"""Return 1 eigenvector in 1d with index `j`
and number of grid points `n` where ``j < n``.
"""
if self.boundary_conditions == 'dirichlet':
i = np.pi * (np.arange(n) + 1) / (n + 1)
ev = np.sqrt(2. / (n + 1.)) * np.sin(i * (j + 1))
elif self.boundary_conditions == 'neumann':
i = np.pi * (np.arange(n) + 0.5) / n
ev = np.sqrt((1. if j == 0 else 2.) / n) * np.cos(i * j)
else: # boundary_conditions == 'periodic'
if j == 0:
ev = np.sqrt(1. / n) * np.ones(n)
elif j + 1 == n and n % 2 == 0:
ev = np.sqrt(1. / n) * np.tile([1, -1], n//2)
else:
i = 2. * np.pi * (np.arange(n) + 0.5) / n
ev = np.sqrt(2. / n) * np.cos(i * np.floor((j + 1) / 2))
# make small values exact zeros correcting round-off errors
# due to symmetry of eigenvectors the exact 0. is correct
ev[np.abs(ev) < np.finfo(np.float64).eps] = 0.
return ev
def _one_eve(self, k):
"""Return 1 eigenvector in Nd with multi-index `j`
as a tensor product of the corresponding 1d eigenvectors.
"""
phi = [self._ev1d(j, n) for j, n in zip(k, self.grid_shape)]
result = phi[0]
for phi in phi[1:]:
result = np.tensordot(result, phi, axes=0)
return np.asarray(result).ravel()
def eigenvectors(self, m=None):
"""Return the requested number of eigenvectors for ordered eigenvalues.
Parameters
----------
m : int, optional
The positive number of eigenvectors to return. If not provided,
then all eigenvectors will be returned.
Returns
-------
eigenvectors : float array
An array with columns made of the requested `m` or all eigenvectors.
The columns are ordered according to the `m` ordered eigenvalues.
"""
_, ind = self._eigenvalue_ordering(m)
if m is None:
grid_shape_min = self.grid_shape
else:
grid_shape_min = min(self.grid_shape,
tuple(np.ones_like(self.grid_shape) * m))
N_indices = np.unravel_index(ind, grid_shape_min)
N_indices = [tuple(x) for x in zip(*N_indices)]
eigenvectors_list = [self._one_eve(k) for k in N_indices]
return np.column_stack(eigenvectors_list)
def toarray(self):
"""
Converts the Laplacian data to a dense array.
Returns
-------
L : ndarray
The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``.
"""
grid_shape = self.grid_shape
n = np.prod(grid_shape)
L = np.zeros([n, n], dtype=np.int8)
# Scratch arrays
L_i = np.empty_like(L)
Ltemp = np.empty_like(L)
for ind, dim in enumerate(grid_shape):
# Start zeroing out L_i
L_i[:] = 0
# Allocate the top left corner with the kernel of L_i
# Einsum returns writable view of arrays
np.einsum("ii->i", L_i[:dim, :dim])[:] = -2
np.einsum("ii->i", L_i[: dim - 1, 1:dim])[:] = 1
np.einsum("ii->i", L_i[1:dim, : dim - 1])[:] = 1
if self.boundary_conditions == 'neumann':
L_i[0, 0] = -1
L_i[dim - 1, dim - 1] = -1
elif self.boundary_conditions == 'periodic':
if dim > 1:
L_i[0, dim - 1] += 1
L_i[dim - 1, 0] += 1
else:
L_i[0, 0] += 1
# kron is too slow for large matrices hence the next two tricks
# 1- kron(eye, mat) is block_diag(mat, mat, ...)
# 2- kron(mat, eye) can be performed by 4d stride trick
# 1-
new_dim = dim
# for block_diag we tile the top left portion on the diagonal
if ind > 0:
tiles = np.prod(grid_shape[:ind])
for j in range(1, tiles):
L_i[j*dim:(j+1)*dim, j*dim:(j+1)*dim] = L_i[:dim, :dim]
new_dim += dim
# 2-
# we need the keep L_i, but reset the array
Ltemp[:new_dim, :new_dim] = L_i[:new_dim, :new_dim]
tiles = int(np.prod(grid_shape[ind+1:]))
# Zero out the top left, the rest is already 0
L_i[:new_dim, :new_dim] = 0
idx = [x for x in range(tiles)]
L_i.reshape(
(new_dim, tiles,
new_dim, tiles)
)[:, idx, :, idx] = Ltemp[:new_dim, :new_dim]
L += L_i
return L.astype(self.dtype)
def tosparse(self):
"""
Constructs a sparse array from the Laplacian data. The returned sparse
array format is dependent on the selected boundary conditions.
Returns
-------
L : scipy.sparse.sparray
The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``.
"""
N = len(self.grid_shape)
p = np.prod(self.grid_shape)
L = dia_array((p, p), dtype=np.int8)
for i in range(N):
dim = self.grid_shape[i]
data = np.ones([3, dim], dtype=np.int8)
data[1, :] *= -2
if self.boundary_conditions == 'neumann':
data[1, 0] = -1
data[1, -1] = -1
L_i = dia_array((data, [-1, 0, 1]), shape=(dim, dim),
dtype=np.int8
)
if self.boundary_conditions == 'periodic':
t = dia_array((dim, dim), dtype=np.int8)
t.setdiag([1], k=-dim+1)
t.setdiag([1], k=dim-1)
L_i += t
for j in range(i):
L_i = kron(eye_array(self.grid_shape[j], dtype=np.int8), L_i)
for j in range(i + 1, N):
L_i = kron(L_i, eye_array(self.grid_shape[j], dtype=np.int8))
L += L_i
return L.astype(self.dtype)
def _matvec(self, x):
grid_shape = self.grid_shape
N = len(grid_shape)
X = x.reshape(grid_shape + (-1,))
Y = -2 * N * X
for i in range(N):
Y += np.roll(X, 1, axis=i)
Y += np.roll(X, -1, axis=i)
if self.boundary_conditions in ('neumann', 'dirichlet'):
Y[(slice(None),)*i + (0,) + (slice(None),)*(N-i-1)
] -= np.roll(X, 1, axis=i)[
(slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
]
Y[
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
] -= np.roll(X, -1, axis=i)[
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
]
if self.boundary_conditions == 'neumann':
Y[
(slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
] += np.roll(X, 0, axis=i)[
(slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
]
Y[
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
] += np.roll(X, 0, axis=i)[
(slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
]
return Y.reshape(-1, X.shape[-1])
def _matmat(self, x):
return self._matvec(x)
def _adjoint(self):
return self
def _transpose(self):
return self
class Sakurai(LinearOperator):
"""
Construct a Sakurai matrix in various formats and its eigenvalues.
Constructs the "Sakurai" matrix motivated by reference [1]_:
square real symmetric positive definite and 5-diagonal
with the main diagonal ``[5, 6, 6, ..., 6, 6, 5], the ``+1`` and ``-1``
diagonals filled with ``-4``, and the ``+2`` and ``-2`` diagonals
made of ``1``. Its eigenvalues are analytically known to be
``16. * np.power(np.cos(0.5 * k * np.pi / (n + 1)), 4)``.
The matrix gets ill-conditioned with its size growing.
It is useful for testing and benchmarking sparse eigenvalue solvers
especially those taking advantage of its banded 5-diagonal structure.
See the notes below for details.
Parameters
----------
n : int
The size of the matrix.
dtype : dtype
Numerical type of the array. Default is ``np.int8``.
Methods
-------
toarray()
Construct a dense array from Laplacian data
tosparse()
Construct a sparse array from Laplacian data
tobanded()
The Sakurai matrix in the format for banded symmetric matrices,
i.e., (3, n) ndarray with 3 upper diagonals
placing the main diagonal at the bottom.
eigenvalues
All eigenvalues of the Sakurai matrix ordered ascending.
Notes
-----
Reference [1]_ introduces a generalized eigenproblem for the matrix pair
`A` and `B` where `A` is the identity so we turn it into an eigenproblem
just for the matrix `B` that this function outputs in various formats
together with its eigenvalues.
.. versionadded:: 1.12.0
References
----------
.. [1] T. Sakurai, H. Tadano, Y. Inadomi, and U. Nagashima,
"A moment-based method for large-scale generalized
eigenvalue problems",
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg._special_sparse_arrays import Sakurai
>>> from scipy.linalg import eig_banded
>>> n = 6
>>> sak = Sakurai(n)
Since all matrix entries are small integers, ``'int8'`` is
the default dtype for storing matrix representations.
>>> sak.toarray()
array([[ 5, -4, 1, 0, 0, 0],
[-4, 6, -4, 1, 0, 0],
[ 1, -4, 6, -4, 1, 0],
[ 0, 1, -4, 6, -4, 1],
[ 0, 0, 1, -4, 6, -4],
[ 0, 0, 0, 1, -4, 5]], dtype=int8)
>>> sak.tobanded()
array([[ 1, 1, 1, 1, 1, 1],
[-4, -4, -4, -4, -4, -4],
[ 5, 6, 6, 6, 6, 5]], dtype=int8)
>>> sak.tosparse()
<DIAgonal sparse array of dtype 'int8'
with 24 stored elements (5 diagonals) and shape (6, 6)>
>>> np.array_equal(sak.dot(np.eye(n)), sak.tosparse().toarray())
True
>>> sak.eigenvalues()
array([0.03922866, 0.56703972, 2.41789479, 5.97822974,
10.54287655, 14.45473055])
>>> sak.eigenvalues(2)
array([0.03922866, 0.56703972])
The banded form can be used in scipy functions for banded matrices, e.g.,
>>> e = eig_banded(sak.tobanded(), eigvals_only=True)
>>> np.allclose(sak.eigenvalues(), e, atol= n * n * n * np.finfo(float).eps)
True
"""
def __init__(self, n, dtype=np.int8):
self.n = n
self.dtype = dtype
shape = (n, n)
super().__init__(dtype, shape)
def eigenvalues(self, m=None):
"""Return the requested number of eigenvalues.
Parameters
----------
m : int, optional
The positive number of smallest eigenvalues to return.
If not provided, then all eigenvalues will be returned.
Returns
-------
eigenvalues : `np.float64` array
The requested `m` smallest or all eigenvalues, in ascending order.
"""
if m is None:
m = self.n
k = np.arange(self.n + 1 -m, self.n + 1)
return np.flip(16. * np.power(np.cos(0.5 * k * np.pi / (self.n + 1)), 4))
def tobanded(self):
"""
Construct the Sakurai matrix as a banded array.
"""
d0 = np.r_[5, 6 * np.ones(self.n - 2, dtype=self.dtype), 5]
d1 = -4 * np.ones(self.n, dtype=self.dtype)
d2 = np.ones(self.n, dtype=self.dtype)
return np.array([d2, d1, d0]).astype(self.dtype)
def tosparse(self):
"""
Construct the Sakurai matrix in a sparse format.
"""
from scipy.sparse import diags_array
d = self.tobanded()
# the banded format has the main diagonal at the bottom
# `diags_array` inherits dtype from banded
return diags_array([d[0], d[1], d[2], d[1], d[0]], offsets=[-2, -1, 0, 1, 2],
shape=(self.n, self.n), dtype=d.dtype)
def toarray(self):
return self.tosparse().toarray()
def _matvec(self, x):
"""
Construct matrix-free callable banded-matrix-vector multiplication by
the Sakurai matrix without constructing or storing the matrix itself
using the knowledge of its entries and the 5-diagonal format.
"""
x = x.reshape(self.n, -1)
result_dtype = np.promote_types(x.dtype, self.dtype)
sx = np.zeros_like(x, dtype=result_dtype)
sx[0, :] = 5 * x[0, :] - 4 * x[1, :] + x[2, :]
sx[-1, :] = 5 * x[-1, :] - 4 * x[-2, :] + x[-3, :]
sx[1: -1, :] = (6 * x[1: -1, :] - 4 * (x[:-2, :] + x[2:, :])
+ np.pad(x[:-3, :], ((1, 0), (0, 0)))
+ np.pad(x[3:, :], ((0, 1), (0, 0))))
return sx
def _matmat(self, x):
"""
Construct matrix-free callable matrix-matrix multiplication by
the Sakurai matrix without constructing or storing the matrix itself
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
"""
return self._matvec(x)
def _adjoint(self):
return self
def _transpose(self):
return self
class MikotaM(LinearOperator):
"""
Construct a mass matrix in various formats of Mikota pair.
The mass matrix `M` is square real diagonal
positive definite with entries that are reciprocal to integers.
Parameters
----------
shape : tuple of int
The shape of the matrix.
dtype : dtype
Numerical type of the array. Default is ``np.float64``.
Methods
-------
toarray()
Construct a dense array from Mikota data
tosparse()
Construct a sparse array from Mikota data
tobanded()
The format for banded symmetric matrices,
i.e., (1, n) ndarray with the main diagonal.
"""
def __init__(self, shape, dtype=np.float64):
self.shape = shape
self.dtype = dtype
super().__init__(dtype, shape)
def _diag(self):
# The matrix is constructed from its diagonal 1 / [1, ..., N+1];
# compute in a function to avoid duplicated code & storage footprint
return (1. / np.arange(1, self.shape[0] + 1)).astype(self.dtype)
def tobanded(self):
return self._diag()
def tosparse(self):
from scipy.sparse import diags_array
return diags_array([self._diag()], offsets=[0],
shape=self.shape, dtype=self.dtype)
def toarray(self):
return np.diag(self._diag()).astype(self.dtype)
def _matvec(self, x):
"""
Construct matrix-free callable banded-matrix-vector multiplication by
the Mikota mass matrix without constructing or storing the matrix itself
using the knowledge of its entries and the diagonal format.
"""
x = x.reshape(self.shape[0], -1)
return self._diag()[:, np.newaxis] * x
def _matmat(self, x):
"""
Construct matrix-free callable matrix-matrix multiplication by
the Mikota mass matrix without constructing or storing the matrix itself
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
"""
return self._matvec(x)
def _adjoint(self):
return self
def _transpose(self):
return self
class MikotaK(LinearOperator):
"""
Construct a stiffness matrix in various formats of Mikota pair.
The stiffness matrix `K` is square real tri-diagonal symmetric
positive definite with integer entries.
Parameters
----------
shape : tuple of int
The shape of the matrix.
dtype : dtype
Numerical type of the array. Default is ``np.int32``.
Methods
-------
toarray()
Construct a dense array from Mikota data
tosparse()
Construct a sparse array from Mikota data
tobanded()
The format for banded symmetric matrices,
i.e., (2, n) ndarray with 2 upper diagonals
placing the main diagonal at the bottom.
"""
def __init__(self, shape, dtype=np.int32):
self.shape = shape
self.dtype = dtype
super().__init__(dtype, shape)
# The matrix is constructed from its diagonals;
# we precompute these to avoid duplicating the computation
n = shape[0]
self._diag0 = np.arange(2 * n - 1, 0, -2, dtype=self.dtype)
self._diag1 = - np.arange(n - 1, 0, -1, dtype=self.dtype)
def tobanded(self):
return np.array([np.pad(self._diag1, (1, 0), 'constant'), self._diag0])
def tosparse(self):
from scipy.sparse import diags_array
return diags_array([self._diag1, self._diag0, self._diag1], offsets=[-1, 0, 1],
shape=self.shape, dtype=self.dtype)
def toarray(self):
return self.tosparse().toarray()
def _matvec(self, x):
"""
Construct matrix-free callable banded-matrix-vector multiplication by
the Mikota stiffness matrix without constructing or storing the matrix
itself using the knowledge of its entries and the 3-diagonal format.
"""
x = x.reshape(self.shape[0], -1)
result_dtype = np.promote_types(x.dtype, self.dtype)
kx = np.zeros_like(x, dtype=result_dtype)
d1 = self._diag1
d0 = self._diag0
kx[0, :] = d0[0] * x[0, :] + d1[0] * x[1, :]
kx[-1, :] = d1[-1] * x[-2, :] + d0[-1] * x[-1, :]
kx[1: -1, :] = (d1[:-1, None] * x[: -2, :]
+ d0[1: -1, None] * x[1: -1, :]
+ d1[1:, None] * x[2:, :])
return kx
def _matmat(self, x):
"""
Construct matrix-free callable matrix-matrix multiplication by
the Stiffness mass matrix without constructing or storing the matrix itself
by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
"""
return self._matvec(x)
def _adjoint(self):
return self
def _transpose(self):
return self
class MikotaPair:
"""
Construct the Mikota pair of matrices in various formats and
eigenvalues of the generalized eigenproblem with them.
The Mikota pair of matrices [1, 2]_ models a vibration problem
of a linear mass-spring system with the ends attached where
the stiffness of the springs and the masses increase along
the system length such that vibration frequencies are subsequent
integers 1, 2, ..., `n` where `n` is the number of the masses. Thus,
eigenvalues of the generalized eigenvalue problem for
the matrix pair `K` and `M` where `K` is the system stiffness matrix
and `M` is the system mass matrix are the squares of the integers,
i.e., 1, 4, 9, ..., ``n * n``.
The stiffness matrix `K` is square real tri-diagonal symmetric
positive definite. The mass matrix `M` is diagonal with diagonal
entries 1, 1/2, 1/3, ...., ``1/n``. Both matrices get
ill-conditioned with `n` growing.
Parameters
----------
n : int
The size of the matrices of the Mikota pair.
dtype : dtype
Numerical type of the array. Default is ``np.float64``.
Attributes
----------
eigenvalues : 1D ndarray, ``np.uint64``
All eigenvalues of the Mikota pair ordered ascending.
Methods
-------
MikotaK()
A `LinearOperator` custom object for the stiffness matrix.
MikotaM()
A `LinearOperator` custom object for the mass matrix.
.. versionadded:: 1.12.0
References
----------
.. [1] J. Mikota, "Frequency tuning of chain structure multibody oscillators
to place the natural frequencies at omega1 and N-1 integer multiples
omega2,..., omegaN", Z. Angew. Math. Mech. 81 (2001), S2, S201-S202.
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
.. [2] Peter C. Muller and Metin Gurgoze,
"Natural frequencies of a multi-degree-of-freedom vibration system",
Proc. Appl. Math. Mech. 6, 319-320 (2006).
http://dx.doi.org/10.1002/pamm.200610141.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg._special_sparse_arrays import MikotaPair
>>> n = 6
>>> mik = MikotaPair(n)
>>> mik_k = mik.k
>>> mik_m = mik.m
>>> mik_k.toarray()
array([[11., -5., 0., 0., 0., 0.],
[-5., 9., -4., 0., 0., 0.],
[ 0., -4., 7., -3., 0., 0.],
[ 0., 0., -3., 5., -2., 0.],
[ 0., 0., 0., -2., 3., -1.],
[ 0., 0., 0., 0., -1., 1.]])
>>> mik_k.tobanded()
array([[ 0., -5., -4., -3., -2., -1.],
[11., 9., 7., 5., 3., 1.]])
>>> mik_m.tobanded()
array([1. , 0.5 , 0.33333333, 0.25 , 0.2 ,
0.16666667])
>>> mik_k.tosparse()
<DIAgonal sparse array of dtype 'float64'
with 16 stored elements (3 diagonals) and shape (6, 6)>
>>> mik_m.tosparse()
<DIAgonal sparse array of dtype 'float64'
with 6 stored elements (1 diagonals) and shape (6, 6)>
>>> np.array_equal(mik_k(np.eye(n)), mik_k.toarray())
True
>>> np.array_equal(mik_m(np.eye(n)), mik_m.toarray())
True
>>> mik.eigenvalues()
array([ 1, 4, 9, 16, 25, 36])
>>> mik.eigenvalues(2)
array([ 1, 4])
"""
def __init__(self, n, dtype=np.float64):
self.n = n
self.dtype = dtype
self.shape = (n, n)
self.m = MikotaM(self.shape, self.dtype)
self.k = MikotaK(self.shape, self.dtype)
def eigenvalues(self, m=None):
"""Return the requested number of eigenvalues.
Parameters
----------
m : int, optional
The positive number of smallest eigenvalues to return.
If not provided, then all eigenvalues will be returned.
Returns
-------
eigenvalues : `np.uint64` array
The requested `m` smallest or all eigenvalues, in ascending order.
"""
if m is None:
m = self.n
arange_plus1 = np.arange(1, m + 1, dtype=np.uint64)
return arange_plus1 * arange_plus1

View file

@ -0,0 +1,309 @@
"""
Python wrapper for PROPACK
--------------------------
PROPACK is a collection of Fortran routines for iterative computation
of partial SVDs of large matrices or linear operators.
Based on BSD licensed pypropack project:
http://github.com/jakevdp/pypropack
Author: Jake Vanderplas <vanderplas@astro.washington.edu>
PROPACK source is BSD licensed, and available at
http://soi.stanford.edu/~rmunk/PROPACK/
"""
__all__ = ['_svdp']
import numpy as np
from scipy.sparse.linalg import aslinearoperator
from scipy.linalg import LinAlgError
from ._propack import _spropack # type: ignore[attr-defined]
from ._propack import _dpropack # type: ignore[attr-defined]
from ._propack import _cpropack # type: ignore[attr-defined]
from ._propack import _zpropack # type: ignore[attr-defined]
_lansvd_dict = {
'f': _spropack.slansvd,
'd': _dpropack.dlansvd,
'F': _cpropack.clansvd,
'D': _zpropack.zlansvd,
}
_lansvd_irl_dict = {
'f': _spropack.slansvd_irl,
'd': _dpropack.dlansvd_irl,
'F': _cpropack.clansvd_irl,
'D': _zpropack.zlansvd_irl,
}
_which_converter = {
'LM': 'L',
'SM': 'S',
}
class _AProd:
"""
Wrapper class for linear operator
The call signature of the __call__ method matches the callback of
the PROPACK routines.
"""
def __init__(self, A):
try:
self.A = aslinearoperator(A)
except TypeError:
self.A = aslinearoperator(np.asarray(A))
def __call__(self, transa, m, n, x, y, sparm, iparm):
if transa == 'n':
y[:] = self.A.matvec(x)
else:
y[:] = self.A.rmatvec(x)
@property
def shape(self):
return self.A.shape
@property
def dtype(self):
try:
return self.A.dtype
except AttributeError:
return self.A.matvec(np.zeros(self.A.shape[1])).dtype
def _svdp(A, k, which='LM', irl_mode=True, kmax=None,
compute_u=True, compute_v=True, v0=None, full_output=False, tol=0,
delta=None, eta=None, anorm=0, cgs=False, elr=True,
min_relgap=0.002, shifts=None, maxiter=None, rng=None):
"""
Compute the singular value decomposition of a linear operator using PROPACK
Parameters
----------
A : array_like, sparse matrix, or LinearOperator
Operator for which SVD will be computed. If `A` is a LinearOperator
object, it must define both ``matvec`` and ``rmatvec`` methods.
k : int
Number of singular values/vectors to compute
which : {"LM", "SM"}
Which singular triplets to compute:
- 'LM': compute triplets corresponding to the `k` largest singular
values
- 'SM': compute triplets corresponding to the `k` smallest singular
values
`which='SM'` requires `irl_mode=True`. Computes largest singular
values by default.
irl_mode : bool, optional
If `True`, then compute SVD using IRL (implicitly restarted Lanczos)
mode. Default is `True`.
kmax : int, optional
Maximal number of iterations / maximal dimension of the Krylov
subspace. Default is ``10 * k``.
compute_u : bool, optional
If `True` (default) then compute left singular vectors, `u`.
compute_v : bool, optional
If `True` (default) then compute right singular vectors, `v`.
tol : float, optional
The desired relative accuracy for computed singular values.
If not specified, it will be set based on machine precision.
v0 : array_like, optional
Starting vector for iterations: must be of length ``A.shape[0]``.
If not specified, PROPACK will generate a starting vector.
full_output : bool, optional
If `True`, then return sigma_bound. Default is `False`.
delta : float, optional
Level of orthogonality to maintain between Lanczos vectors.
Default is set based on machine precision.
eta : float, optional
Orthogonality cutoff. During reorthogonalization, vectors with
component larger than `eta` along the Lanczos vector will be purged.
Default is set based on machine precision.
anorm : float, optional
Estimate of ``||A||``. Default is ``0``.
cgs : bool, optional
If `True`, reorthogonalization is done using classical Gram-Schmidt.
If `False` (default), it is done using modified Gram-Schmidt.
elr : bool, optional
If `True` (default), then extended local orthogonality is enforced
when obtaining singular vectors.
min_relgap : float, optional
The smallest relative gap allowed between any shift in IRL mode.
Default is ``0.001``. Accessed only if ``irl_mode=True``.
shifts : int, optional
Number of shifts per restart in IRL mode. Default is determined
to satisfy ``k <= min(kmax-shifts, m, n)``. Must be
>= 0, but choosing 0 might lead to performance degradation.
Accessed only if ``irl_mode=True``.
maxiter : int, optional
Maximum number of restarts in IRL mode. Default is ``1000``.
Accessed only if ``irl_mode=True``.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
Returns
-------
u : ndarray
The `k` largest (``which="LM"``) or smallest (``which="SM"``) left
singular vectors, ``shape == (A.shape[0], 3)``, returned only if
``compute_u=True``.
sigma : ndarray
The top `k` singular values, ``shape == (k,)``
vt : ndarray
The `k` largest (``which="LM"``) or smallest (``which="SM"``) right
singular vectors, ``shape == (3, A.shape[1])``, returned only if
``compute_v=True``.
sigma_bound : ndarray
the error bounds on the singular values sigma, returned only if
``full_output=True``.
"""
if rng is None:
raise ValueError("`rng` must be a normalized numpy.random.Generator instance")
which = which.upper()
if which not in {'LM', 'SM'}:
raise ValueError("`which` must be either 'LM' or 'SM'")
if not irl_mode and which == 'SM':
raise ValueError("`which`='SM' requires irl_mode=True")
aprod = _AProd(A)
typ = aprod.dtype.char
try:
lansvd_irl = _lansvd_irl_dict[typ]
lansvd = _lansvd_dict[typ]
except KeyError:
# work with non-supported types using native system precision
if np.iscomplexobj(np.empty(0, dtype=typ)):
typ = np.dtype(complex).char
else:
typ = np.dtype(float).char
lansvd_irl = _lansvd_irl_dict[typ]
lansvd = _lansvd_dict[typ]
m, n = aprod.shape
if (k < 1) or (k > min(m, n)):
raise ValueError("k must be positive and not greater than m or n")
if kmax is None:
kmax = 10*k
if maxiter is None:
maxiter = 1000
# guard against unnecessarily large kmax
kmax = min(m + 1, n + 1, kmax)
if kmax < k:
raise ValueError(
"kmax must be greater than or equal to k, "
f"but kmax ({kmax}) < k ({k})")
# convert python args to fortran args
jobu = 'y' if compute_u else 'n'
jobv = 'y' if compute_v else 'n'
# these will be the output arrays
u = np.zeros((m, kmax + 1), order='F', dtype=typ)
v = np.zeros((n, kmax), order='F', dtype=typ)
# Specify the starting vector. if v0 is all zero, PROPACK will generate
# a random starting vector: the random seed cannot be controlled in that
# case, so we'll instead use numpy to generate a random vector
if v0 is None:
u[:, 0] = rng.uniform(size=m)
if np.iscomplexobj(np.empty(0, dtype=typ)): # complex type
u[:, 0] += 1j * rng.uniform(size=m)
else:
try:
u[:, 0] = v0
except ValueError:
raise ValueError(f"v0 must be of length {m}")
# process options for the fit
if delta is None:
delta = np.sqrt(np.finfo(typ).eps)
if eta is None:
eta = np.finfo(typ).eps ** 0.75
if irl_mode:
doption = np.array((delta, eta, anorm, min_relgap), dtype=typ.lower())
# validate or find default shifts
if shifts is None:
shifts = kmax - k
if k > min(kmax - shifts, m, n):
raise ValueError('shifts must satisfy '
'k <= min(kmax-shifts, m, n)!')
elif shifts < 0:
raise ValueError('shifts must be >= 0!')
else:
doption = np.array((delta, eta, anorm), dtype=typ.lower())
ioption = np.array((int(bool(cgs)), int(bool(elr))), dtype='i')
# If computing `u` or `v` (left and right singular vectors,
# respectively), `blocksize` controls how large a fraction of the
# work is done via fast BLAS level 3 operations. A larger blocksize
# may lead to faster computation at the expense of greater memory
# consumption. `blocksize` must be ``>= 1``. Choosing blocksize
# of 16, but docs don't specify; it's almost surely a
# power of 2.
blocksize = 16
# Determine lwork & liwork:
# the required lengths are specified in the PROPACK documentation
if compute_u or compute_v:
lwork = m + n + 9*kmax + 5*kmax*kmax + 4 + max(
3*kmax*kmax + 4*kmax + 4,
blocksize*max(m, n))
liwork = 8*kmax
else:
lwork = m + n + 9*kmax + 2*kmax*kmax + 4 + max(m + n, 4*kmax + 4)
liwork = 2*kmax + 1
work = np.empty(lwork, dtype=typ.lower())
iwork = np.empty(liwork, dtype=np.int32)
# dummy arguments: these are passed to aprod, and not used in this wrapper
dparm = np.empty(1, dtype=typ.lower())
iparm = np.empty(1, dtype=np.int32)
if typ.isupper():
# PROPACK documentation is unclear on the required length of zwork.
# Use the same length Julia's wrapper uses
# see https://github.com/JuliaSmoothOptimizers/PROPACK.jl/
zwork = np.empty(m + n + 32*m, dtype=typ)
works = work, zwork, iwork
else:
works = work, iwork
if irl_mode:
u, sigma, bnd, v, info = lansvd_irl(_which_converter[which], jobu,
jobv, m, n, shifts, k, maxiter,
aprod, u, v, tol, *works, doption,
ioption, dparm, iparm)
else:
u, sigma, bnd, v, info = lansvd(jobu, jobv, m, n, k, aprod, u, v, tol,
*works, doption, ioption, dparm, iparm)
if info > 0:
raise LinAlgError(
f"An invariant subspace of dimension {info} was found.")
elif info < 0:
raise LinAlgError(
f"k={k} singular triplets did not converge within "
f"kmax={kmax} iterations")
# info == 0: The K largest (or smallest) singular triplets were computed
# successfully!
return u[:, :k], sigma, v[:, :k].conj().T, bnd

View file

@ -0,0 +1,22 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.sparse.linalg` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'MatrixRankWarning', 'SuperLU', 'factorized',
'spilu', 'splu', 'spsolve',
'spsolve_triangular', 'use_solver', 'test'
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="sparse.linalg", module="dsolve",
private_modules=["_dsolve"], all=__all__,
attribute=name)

View file

@ -0,0 +1,21 @@
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.sparse.linalg` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'ArpackError', 'ArpackNoConvergence', 'ArpackError',
'eigs', 'eigsh', 'lobpcg', 'svds', 'test'
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="sparse.linalg", module="eigen",
private_modules=["_eigen"], all=__all__,
attribute=name)

Some files were not shown because too many files have changed in this diff Show more