up follow livre

This commit is contained in:
Tykayn 2025-08-30 18:14:14 +02:00 committed by tykayn
parent b4b4398bb0
commit 3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions

View file

@ -0,0 +1,341 @@
"""Test of 1D arithmetic operations"""
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy.sparse import coo_array, csr_array
from scipy.sparse._sputils import isscalarlike
spcreators = [coo_array, csr_array]
math_dtypes = [np.int64, np.float64, np.complex128]
def toarray(a):
if isinstance(a, np.ndarray) or isscalarlike(a):
return a
return a.toarray()
@pytest.fixture
def dat1d():
return np.array([3, 0, 1, 0], 'd')
@pytest.fixture
def datsp_math_dtypes(dat1d):
dat_dtypes = {dtype: dat1d.astype(dtype) for dtype in math_dtypes}
return {
sp: [(dtype, dat, sp(dat)) for dtype, dat in dat_dtypes.items()]
for sp in spcreators
}
@pytest.mark.parametrize("spcreator", spcreators)
class TestArithmetic1D:
def test_empty_arithmetic(self, spcreator):
shape = (5,)
for mytype in [
np.dtype('int32'),
np.dtype('float32'),
np.dtype('float64'),
np.dtype('complex64'),
np.dtype('complex128'),
]:
a = spcreator(shape, dtype=mytype)
b = a + a
c = 2 * a
assert isinstance(a @ a.tocsr(), np.ndarray)
assert isinstance(a @ a.tocoo(), np.ndarray)
for m in [a, b, c]:
assert m @ m == a.toarray() @ a.toarray()
assert m.dtype == mytype
assert toarray(m).dtype == mytype
def test_abs(self, spcreator):
A = np.array([-1, 0, 17, 0, -5, 0, 1, -4, 0, 0, 0, 0], 'd')
assert_equal(abs(A), abs(spcreator(A)).toarray())
def test_round(self, spcreator):
A = np.array([-1.35, 0.56, 17.25, -5.98], 'd')
Asp = spcreator(A)
assert_equal(np.around(A, decimals=1), round(Asp, ndigits=1).toarray())
def test_elementwise_power(self, spcreator):
A = np.array([-4, -3, -2, -1, 0, 1, 2, 3, 4], 'd')
Asp = spcreator(A)
assert_equal(np.power(A, 2), Asp.power(2).toarray())
# element-wise power function needs a scalar power
with pytest.raises(NotImplementedError, match='input is not scalar'):
spcreator(A).power(A)
def test_real(self, spcreator):
D = np.array([1 + 3j, 2 - 4j])
A = spcreator(D)
assert_equal(A.real.toarray(), D.real)
def test_imag(self, spcreator):
D = np.array([1 + 3j, 2 - 4j])
A = spcreator(D)
assert_equal(A.imag.toarray(), D.imag)
def test_mul_scalar(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
assert_equal(dat * 2, (datsp * 2).toarray())
assert_equal(dat * 17.3, (datsp * 17.3).toarray())
def test_rmul_scalar(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
assert_equal(2 * dat, (2 * datsp).toarray())
assert_equal(17.3 * dat, (17.3 * datsp).toarray())
def test_sub(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
assert_equal((datsp - datsp).toarray(), np.zeros(4))
assert_equal((datsp - 0).toarray(), dat)
A = spcreator([1, -4, 0, 2], dtype='d')
assert_equal((datsp - A).toarray(), dat - A.toarray())
assert_equal((A - datsp).toarray(), A.toarray() - dat)
# test broadcasting
assert_equal(datsp.toarray() - dat[0], dat - dat[0])
def test_add0(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
# Adding 0 to a sparse matrix
assert_equal((datsp + 0).toarray(), dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * datsp for k in range(1, 3)])
sumD = sum([k * dat for k in range(1, 3)])
assert_allclose(sumS.toarray(), sumD)
def test_elementwise_multiply(self, spcreator):
# real/real
A = np.array([4, 0, 9])
B = np.array([0, 7, -1])
Asp = spcreator(A)
Bsp = spcreator(B)
assert_allclose(Asp.multiply(Bsp).toarray(), A * B) # sparse/sparse
assert_allclose(Asp.multiply(B).toarray(), A * B) # sparse/dense
# complex/complex
C = np.array([1 - 2j, 0 + 5j, -1 + 0j])
D = np.array([5 + 2j, 7 - 3j, -2 + 1j])
Csp = spcreator(C)
Dsp = spcreator(D)
assert_allclose(Csp.multiply(Dsp).toarray(), C * D) # sparse/sparse
assert_allclose(Csp.multiply(D).toarray(), C * D) # sparse/dense
# real/complex
assert_allclose(Asp.multiply(Dsp).toarray(), A * D) # sparse/sparse
assert_allclose(Asp.multiply(D).toarray(), A * D) # sparse/dense
def test_elementwise_multiply_broadcast(self, spcreator):
A = np.array([4])
B = np.array([[-9]])
C = np.array([1, -1, 0])
D = np.array([[7, 9, -9]])
E = np.array([[3], [2], [1]])
F = np.array([[8, 6, 3], [-4, 3, 2], [6, 6, 6]])
G = [1, 2, 3]
H = np.ones((3, 4))
J = H.T
K = np.array([[0]])
L = np.array([[[1, 2], [0, 1]]])
# Some arrays can't be cast as spmatrices (A, C, L) so leave
# them out.
Asp = spcreator(A)
Csp = spcreator(C)
Gsp = spcreator(G)
# 2d arrays
Bsp = spcreator(B)
Dsp = spcreator(D)
Esp = spcreator(E)
Fsp = spcreator(F)
Hsp = spcreator(H)
Hspp = spcreator(H[0, None])
Jsp = spcreator(J)
Jspp = spcreator(J[:, 0, None])
Ksp = spcreator(K)
matrices = [A, B, C, D, E, F, G, H, J, K, L]
spmatrices = [Asp, Bsp, Csp, Dsp, Esp, Fsp, Gsp, Hsp, Hspp, Jsp, Jspp, Ksp]
sp1dmatrices = [Asp, Csp, Gsp]
# sparse/sparse
for i in sp1dmatrices:
for j in spmatrices:
try:
dense_mult = i.toarray() * j.toarray()
except ValueError:
with pytest.raises(ValueError, match='inconsistent shapes'):
i.multiply(j)
continue
sp_mult = i.multiply(j)
assert_allclose(sp_mult.toarray(), dense_mult)
# sparse/dense
for i in sp1dmatrices:
for j in matrices:
try:
dense_mult = i.toarray() * j
except TypeError:
continue
except ValueError:
matchme = 'broadcast together|inconsistent shapes'
with pytest.raises(ValueError, match=matchme):
i.multiply(j)
continue
try:
sp_mult = i.multiply(j)
except ValueError:
continue
assert_allclose(toarray(sp_mult), dense_mult)
def test_elementwise_divide(self, spcreator, dat1d):
datsp = spcreator(dat1d)
expected = np.array([1, np.nan, 1, np.nan])
actual = datsp / datsp
# need assert_array_equal to handle nan values
np.testing.assert_array_equal(actual, expected)
denom = spcreator([1, 0, 0, 4], dtype='d')
expected = [3, np.nan, np.inf, 0]
np.testing.assert_array_equal(datsp / denom, expected)
# complex
A = np.array([1 - 2j, 0 + 5j, -1 + 0j])
B = np.array([5 + 2j, 7 - 3j, -2 + 1j])
Asp = spcreator(A)
Bsp = spcreator(B)
assert_allclose(Asp / Bsp, A / B)
# integer
A = np.array([1, 2, 3])
B = np.array([0, 1, 2])
Asp = spcreator(A)
Bsp = spcreator(B)
with np.errstate(divide='ignore'):
assert_equal(Asp / Bsp, A / B)
# mismatching sparsity patterns
A = np.array([0, 1])
B = np.array([1, 0])
Asp = spcreator(A)
Bsp = spcreator(B)
with np.errstate(divide='ignore', invalid='ignore'):
assert_equal(Asp / Bsp, A / B)
def test_pow(self, spcreator):
A = np.array([1, 0, 2, 0])
B = spcreator(A)
# unusual exponents
with pytest.raises(ValueError, match='negative integer powers'):
B**-1
with pytest.raises(NotImplementedError, match='zero power'):
B**0
for exponent in [1, 2, 3, 2.2]:
ret_sp = B**exponent
ret_np = A**exponent
assert_equal(ret_sp.toarray(), ret_np)
assert_equal(ret_sp.dtype, ret_np.dtype)
def test_dot_scalar(self, spcreator, dat1d):
A = spcreator(dat1d)
scalar = 10
actual = A.dot(scalar)
expected = A * scalar
assert_allclose(actual.toarray(), expected.toarray())
def test_matmul(self, spcreator):
Msp = spcreator([2, 0, 3.0])
B = spcreator(np.array([[0, 1], [1, 0], [0, 2]], 'd'))
col = np.array([[1, 2, 3]]).T
# check sparse @ dense 2d column
assert_allclose(Msp @ col, Msp.toarray() @ col)
# check sparse1d @ sparse2d, sparse1d @ dense2d, dense1d @ sparse2d
assert_allclose((Msp @ B).toarray(), (Msp @ B).toarray())
assert_allclose(Msp.toarray() @ B, (Msp @ B).toarray())
assert_allclose(Msp @ B.toarray(), (Msp @ B).toarray())
# check sparse1d @ dense1d, sparse1d @ sparse1d
V = np.array([0, 0, 1])
assert_allclose(Msp @ V, Msp.toarray() @ V)
Vsp = spcreator(V)
Msp_Vsp = Msp @ Vsp
assert isinstance(Msp_Vsp, np.ndarray)
assert Msp_Vsp.shape == ()
# output is 0-dim ndarray
assert_allclose(np.array(3), Msp_Vsp)
assert_allclose(np.array(3), Msp.toarray() @ Vsp)
assert_allclose(np.array(3), Msp @ Vsp.toarray())
assert_allclose(np.array(3), Msp.toarray() @ Vsp.toarray())
# check error on matrix-scalar
with pytest.raises(ValueError, match='Scalar operands are not allowed'):
Msp @ 1
with pytest.raises(ValueError, match='Scalar operands are not allowed'):
1 @ Msp
def test_sub_dense(self, spcreator, datsp_math_dtypes):
# subtracting a dense matrix to/from a sparse matrix
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
# Manually add to avoid upcasting from scalar
# multiplication.
sum1 = (dat + dat + dat) - datsp
assert_equal(sum1, dat + dat)
sum2 = (datsp + datsp + datsp) - dat
assert_equal(sum2, dat + dat)
def test_size_zero_matrix_arithmetic(self, spcreator):
# Test basic matrix arithmetic with shapes like 0, (1, 0), (0, 3), etc.
mat = np.array([])
a = mat.reshape(0)
d = mat.reshape((1, 0))
f = np.ones([5, 5])
asp = spcreator(a)
dsp = spcreator(d)
# bad shape for addition
with pytest.raises(ValueError, match='inconsistent shapes'):
asp.__add__(dsp)
# matrix product.
assert_equal(asp.dot(asp), np.dot(a, a))
# bad matrix products
with pytest.raises(ValueError, match='dimension mismatch|shapes.*not aligned'):
asp.dot(f)
# elemente-wise multiplication
assert_equal(asp.multiply(asp).toarray(), np.multiply(a, a))
assert_equal(asp.multiply(a).toarray(), np.multiply(a, a))
assert_equal(asp.multiply(6).toarray(), np.multiply(a, 6))
# bad element-wise multiplication
with pytest.raises(ValueError, match='inconsistent shapes'):
asp.multiply(f)
# Addition
assert_equal(asp.__add__(asp).toarray(), a.__add__(a))

View file

@ -0,0 +1,561 @@
import pytest
import numpy as np
import numpy.testing as npt
import scipy.sparse
import scipy.sparse.linalg as spla
sparray_types = ('bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil')
sparray_classes = [
getattr(scipy.sparse, f'{T}_array') for T in sparray_types
]
A = np.array([
[0, 1, 2, 0],
[2, 0, 0, 3],
[1, 4, 0, 0]
])
B = np.array([
[0, 1],
[2, 0]
])
X = np.array([
[1, 0, 0, 1],
[2, 1, 2, 0],
[0, 2, 1, 0],
[0, 0, 1, 2]
], dtype=float)
sparrays = [sparray(A) for sparray in sparray_classes]
square_sparrays = [sparray(B) for sparray in sparray_classes]
eig_sparrays = [sparray(X) for sparray in sparray_classes]
parametrize_sparrays = pytest.mark.parametrize(
"A", sparrays, ids=sparray_types
)
parametrize_square_sparrays = pytest.mark.parametrize(
"B", square_sparrays, ids=sparray_types
)
parametrize_eig_sparrays = pytest.mark.parametrize(
"X", eig_sparrays, ids=sparray_types
)
@parametrize_sparrays
def test_sum(A):
assert not isinstance(A.sum(axis=0), np.matrix), \
"Expected array, got matrix"
assert A.sum(axis=0).shape == (4,)
assert A.sum(axis=1).shape == (3,)
@parametrize_sparrays
def test_mean(A):
assert not isinstance(A.mean(axis=1), np.matrix), \
"Expected array, got matrix"
@parametrize_sparrays
def test_min_max(A):
# Some formats don't support min/max operations, so we skip those here.
if hasattr(A, 'min'):
assert not isinstance(A.min(axis=1), np.matrix), \
"Expected array, got matrix"
if hasattr(A, 'max'):
assert not isinstance(A.max(axis=1), np.matrix), \
"Expected array, got matrix"
if hasattr(A, 'argmin'):
assert not isinstance(A.argmin(axis=1), np.matrix), \
"Expected array, got matrix"
if hasattr(A, 'argmax'):
assert not isinstance(A.argmax(axis=1), np.matrix), \
"Expected array, got matrix"
@parametrize_sparrays
def test_todense(A):
assert not isinstance(A.todense(), np.matrix), \
"Expected array, got matrix"
@parametrize_sparrays
def test_indexing(A):
if A.__class__.__name__[:3] in ('dia', 'coo', 'bsr'):
return
all_res = (
A[1, :],
A[:, 1],
A[1, [1, 2]],
A[[1, 2], 1],
A[[0]],
A[:, [1, 2]],
A[[1, 2], :],
A[1, [[1, 2]]],
A[[[1, 2]], 1],
)
for res in all_res:
assert isinstance(res, scipy.sparse.sparray), \
f"Expected sparse array, got {res._class__.__name__}"
@parametrize_sparrays
def test_dense_addition(A):
X = np.random.random(A.shape)
assert not isinstance(A + X, np.matrix), "Expected array, got matrix"
@parametrize_sparrays
def test_sparse_addition(A):
assert isinstance((A + A), scipy.sparse.sparray), "Expected array, got matrix"
@parametrize_sparrays
def test_elementwise_mul(A):
assert np.all((A * A).todense() == A.power(2).todense())
@parametrize_sparrays
def test_elementwise_rmul(A):
with pytest.raises(TypeError):
None * A
with pytest.raises(ValueError):
np.eye(3) * scipy.sparse.csr_array(np.arange(6).reshape(2, 3))
assert np.all((2 * A) == (A.todense() * 2))
assert np.all((A.todense() * A) == (A.todense() ** 2))
@parametrize_sparrays
def test_matmul(A):
assert np.all((A @ A.T).todense() == A.dot(A.T).todense())
@parametrize_sparrays
def test_power_operator(A):
assert isinstance((A**2), scipy.sparse.sparray), "Expected array, got matrix"
# https://github.com/scipy/scipy/issues/15948
npt.assert_equal((A**2).todense(), (A.todense())**2)
# power of zero is all ones (dense) so helpful msg exception
with pytest.raises(NotImplementedError, match="zero power"):
A**0
@parametrize_sparrays
def test_sparse_divide(A):
assert isinstance(A / A, np.ndarray)
@parametrize_sparrays
@pytest.mark.thread_unsafe
def test_sparse_dense_divide(A):
with pytest.warns(RuntimeWarning):
assert isinstance((A / A.todense()), scipy.sparse.sparray)
@parametrize_sparrays
def test_dense_divide(A):
assert isinstance((A / 2), scipy.sparse.sparray), "Expected array, got matrix"
@parametrize_sparrays
def test_no_A_attr(A):
with pytest.raises(AttributeError):
A.A
@parametrize_sparrays
def test_no_H_attr(A):
with pytest.raises(AttributeError):
A.H
@parametrize_sparrays
def test_getrow_getcol(A):
assert isinstance(A._getcol(0), scipy.sparse.sparray)
assert isinstance(A._getrow(0), scipy.sparse.sparray)
# -- linalg --
@parametrize_sparrays
def test_as_linearoperator(A):
L = spla.aslinearoperator(A)
npt.assert_allclose(L * [1, 2, 3, 4], A @ [1, 2, 3, 4])
@parametrize_square_sparrays
def test_inv(B):
if B.__class__.__name__[:3] != 'csc':
return
C = spla.inv(B)
assert isinstance(C, scipy.sparse.sparray)
npt.assert_allclose(C.todense(), np.linalg.inv(B.todense()))
@parametrize_square_sparrays
def test_expm(B):
if B.__class__.__name__[:3] != 'csc':
return
Bmat = scipy.sparse.csc_matrix(B)
C = spla.expm(B)
assert isinstance(C, scipy.sparse.sparray)
npt.assert_allclose(
C.todense(),
spla.expm(Bmat).todense()
)
@parametrize_square_sparrays
def test_expm_multiply(B):
if B.__class__.__name__[:3] != 'csc':
return
npt.assert_allclose(
spla.expm_multiply(B, np.array([1, 2])),
spla.expm(B) @ [1, 2]
)
@parametrize_sparrays
def test_norm(A):
C = spla.norm(A)
npt.assert_allclose(C, np.linalg.norm(A.todense()))
@parametrize_square_sparrays
def test_onenormest(B):
C = spla.onenormest(B)
npt.assert_allclose(C, np.linalg.norm(B.todense(), 1))
@parametrize_square_sparrays
def test_spsolve(B):
if B.__class__.__name__[:3] not in ('csc', 'csr'):
return
npt.assert_allclose(
spla.spsolve(B, [1, 2]),
np.linalg.solve(B.todense(), [1, 2])
)
@pytest.mark.parametrize("fmt",["csr","csc"])
def test_spsolve_triangular(fmt):
arr = [
[1, 0, 0, 0],
[2, 1, 0, 0],
[3, 2, 1, 0],
[4, 3, 2, 1],
]
if fmt == "csr":
X = scipy.sparse.csr_array(arr)
else:
X = scipy.sparse.csc_array(arr)
spla.spsolve_triangular(X, [1, 2, 3, 4])
@parametrize_square_sparrays
def test_factorized(B):
if B.__class__.__name__[:3] != 'csc':
return
LU = spla.factorized(B)
npt.assert_allclose(
LU(np.array([1, 2])),
np.linalg.solve(B.todense(), [1, 2])
)
@parametrize_square_sparrays
@pytest.mark.parametrize(
"solver",
["bicg", "bicgstab", "cg", "cgs", "gmres", "lgmres", "minres", "qmr",
"gcrotmk", "tfqmr"]
)
def test_solvers(B, solver):
if solver == "minres":
kwargs = {}
else:
kwargs = {'atol': 1e-5}
x, info = getattr(spla, solver)(B, np.array([1, 2]), **kwargs)
assert info >= 0 # no errors, even if perhaps did not converge fully
npt.assert_allclose(x, [1, 1], atol=1e-1)
@parametrize_sparrays
@pytest.mark.parametrize(
"solver",
["lsqr", "lsmr"]
)
def test_lstsqr(A, solver):
x, *_ = getattr(spla, solver)(A, [1, 2, 3])
npt.assert_allclose(A @ x, [1, 2, 3])
@parametrize_eig_sparrays
def test_eigs(X):
e, v = spla.eigs(X, k=1)
npt.assert_allclose(
X @ v,
e[0] * v
)
@parametrize_eig_sparrays
def test_eigsh(X):
X = X + X.T
e, v = spla.eigsh(X, k=1)
npt.assert_allclose(
X @ v,
e[0] * v
)
@parametrize_eig_sparrays
def test_svds(X):
u, s, vh = spla.svds(X, k=3)
u2, s2, vh2 = np.linalg.svd(X.todense())
s = np.sort(s)
s2 = np.sort(s2[:3])
npt.assert_allclose(s, s2, atol=1e-3)
def test_splu():
X = scipy.sparse.csc_array([
[1, 0, 0, 0],
[2, 1, 0, 0],
[3, 2, 1, 0],
[4, 3, 2, 1],
])
LU = spla.splu(X)
npt.assert_allclose(
LU.solve(np.array([1, 2, 3, 4])),
np.asarray([1, 0, 0, 0], dtype=np.float64),
rtol=1e-14, atol=3e-16
)
def test_spilu():
X = scipy.sparse.csc_array([
[1, 0, 0, 0],
[2, 1, 0, 0],
[3, 2, 1, 0],
[4, 3, 2, 1],
])
LU = spla.spilu(X)
npt.assert_allclose(
LU.solve(np.array([1, 2, 3, 4])),
np.asarray([1, 0, 0, 0], dtype=np.float64),
rtol=1e-14, atol=3e-16
)
@pytest.mark.parametrize(
"cls,indices_attrs",
[
(
scipy.sparse.csr_array,
["indices", "indptr"],
),
(
scipy.sparse.csc_array,
["indices", "indptr"],
),
(
scipy.sparse.coo_array,
["row", "col"],
),
]
)
@pytest.mark.parametrize("expected_dtype", [np.int64, np.int32])
def test_index_dtype_compressed(cls, indices_attrs, expected_dtype):
input_array = scipy.sparse.coo_array(np.arange(9).reshape(3, 3))
coo_tuple = (
input_array.data,
(
input_array.row.astype(expected_dtype),
input_array.col.astype(expected_dtype),
)
)
result = cls(coo_tuple)
for attr in indices_attrs:
assert getattr(result, attr).dtype == expected_dtype
result = cls(coo_tuple, shape=(3, 3))
for attr in indices_attrs:
assert getattr(result, attr).dtype == expected_dtype
if issubclass(cls, scipy.sparse._compressed._cs_matrix):
input_array_csr = input_array.tocsr()
csr_tuple = (
input_array_csr.data,
input_array_csr.indices.astype(expected_dtype),
input_array_csr.indptr.astype(expected_dtype),
)
result = cls(csr_tuple)
for attr in indices_attrs:
assert getattr(result, attr).dtype == expected_dtype
result = cls(csr_tuple, shape=(3, 3))
for attr in indices_attrs:
assert getattr(result, attr).dtype == expected_dtype
def test_default_is_matrix_diags():
m = scipy.sparse.diags([0, 1, 2])
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_eye():
m = scipy.sparse.eye(3)
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_spdiags():
m = scipy.sparse.spdiags([1, 2, 3], 0, 3, 3)
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_identity():
m = scipy.sparse.identity(3)
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_kron_dense():
m = scipy.sparse.kron(
np.array([[1, 2], [3, 4]]), np.array([[4, 3], [2, 1]])
)
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_kron_sparse():
m = scipy.sparse.kron(
np.array([[1, 2], [3, 4]]), np.array([[1, 0], [0, 0]])
)
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_kronsum():
m = scipy.sparse.kronsum(
np.array([[1, 0], [0, 1]]), np.array([[0, 1], [1, 0]])
)
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_random():
m = scipy.sparse.random(3, 3)
assert not isinstance(m, scipy.sparse.sparray)
def test_default_is_matrix_rand():
m = scipy.sparse.rand(3, 3)
assert not isinstance(m, scipy.sparse.sparray)
@pytest.mark.parametrize("fn", (scipy.sparse.hstack, scipy.sparse.vstack))
def test_default_is_matrix_stacks(fn):
"""Same idea as `test_default_construction_fn_matrices`, but for the
stacking creation functions."""
A = scipy.sparse.coo_matrix(np.eye(2))
B = scipy.sparse.coo_matrix([[0, 1], [1, 0]])
m = fn([A, B])
assert not isinstance(m, scipy.sparse.sparray)
def test_blocks_default_construction_fn_matrices():
"""Same idea as `test_default_construction_fn_matrices`, but for the block
creation function"""
A = scipy.sparse.coo_matrix(np.eye(2))
B = scipy.sparse.coo_matrix([[2], [0]])
C = scipy.sparse.coo_matrix([[3]])
# block diag
m = scipy.sparse.block_diag((A, B, C))
assert not isinstance(m, scipy.sparse.sparray)
# bmat
m = scipy.sparse.bmat([[A, None], [None, C]])
assert not isinstance(m, scipy.sparse.sparray)
def test_format_property():
for fmt in sparray_types:
arr_cls = getattr(scipy.sparse, f"{fmt}_array")
M = arr_cls([[1, 2]])
assert M.format == fmt
assert M._format == fmt
with pytest.raises(AttributeError):
M.format = "qqq"
def test_issparse():
m = scipy.sparse.eye(3)
a = scipy.sparse.csr_array(m)
assert not isinstance(m, scipy.sparse.sparray)
assert isinstance(a, scipy.sparse.sparray)
# Both sparse arrays and sparse matrices should be sparse
assert scipy.sparse.issparse(a)
assert scipy.sparse.issparse(m)
# ndarray and array_likes are not sparse
assert not scipy.sparse.issparse(a.todense())
assert not scipy.sparse.issparse(m.todense())
def test_isspmatrix():
m = scipy.sparse.eye(3)
a = scipy.sparse.csr_array(m)
assert not isinstance(m, scipy.sparse.sparray)
assert isinstance(a, scipy.sparse.sparray)
# Should only be true for sparse matrices, not sparse arrays
assert not scipy.sparse.isspmatrix(a)
assert scipy.sparse.isspmatrix(m)
# ndarray and array_likes are not sparse
assert not scipy.sparse.isspmatrix(a.todense())
assert not scipy.sparse.isspmatrix(m.todense())
@pytest.mark.parametrize(
("fmt", "fn"),
(
("bsr", scipy.sparse.isspmatrix_bsr),
("coo", scipy.sparse.isspmatrix_coo),
("csc", scipy.sparse.isspmatrix_csc),
("csr", scipy.sparse.isspmatrix_csr),
("dia", scipy.sparse.isspmatrix_dia),
("dok", scipy.sparse.isspmatrix_dok),
("lil", scipy.sparse.isspmatrix_lil),
),
)
def test_isspmatrix_format(fmt, fn):
m = scipy.sparse.eye(3, format=fmt)
a = scipy.sparse.csr_array(m).asformat(fmt)
assert not isinstance(m, scipy.sparse.sparray)
assert isinstance(a, scipy.sparse.sparray)
# Should only be true for sparse matrices, not sparse arrays
assert not fn(a)
assert fn(m)
# ndarray and array_likes are not sparse
assert not fn(a.todense())
assert not fn(m.todense())

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,447 @@
"""Test of 1D aspects of sparse array classes"""
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from numpy.exceptions import ComplexWarning
from scipy.sparse import (
bsr_array, csc_array, dia_array, lil_array,
coo_array, csr_array, dok_array,
)
from scipy.sparse._sputils import supported_dtypes, matrix
sup_complex = np.testing.suppress_warnings()
sup_complex.filter(ComplexWarning)
spcreators = [coo_array, csr_array, dok_array]
math_dtypes = [np.int64, np.float64, np.complex128]
@pytest.fixture
def dat1d():
return np.array([3, 0, 1, 0], 'd')
@pytest.fixture
def datsp_math_dtypes(dat1d):
dat_dtypes = {dtype: dat1d.astype(dtype) for dtype in math_dtypes}
return {
spcreator: [(dtype, dat, spcreator(dat)) for dtype, dat in dat_dtypes.items()]
for spcreator in spcreators
}
# Test init with 1D dense input
# sparrays which do not plan to support 1D
@pytest.mark.parametrize("spcreator", [bsr_array, csc_array, dia_array, lil_array])
def test_no_1d_support_in_init(spcreator):
with pytest.raises(ValueError, match="arrays don't support 1D input"):
spcreator([0, 1, 2, 3])
# Test init with nD dense input
# sparrays which do not yet support nD
@pytest.mark.parametrize(
"spcreator", [csr_array, dok_array, bsr_array, csc_array, dia_array, lil_array]
)
def test_no_nd_support_in_init(spcreator):
with pytest.raises(ValueError, match="arrays don't.*support 3D"):
spcreator(np.ones((3, 2, 4)))
# Main tests class
@pytest.mark.parametrize("spcreator", spcreators)
class TestCommon1D:
"""test common functionality shared by 1D sparse formats"""
def test_create_empty(self, spcreator):
assert_equal(spcreator((3,)).toarray(), np.zeros(3))
assert_equal(spcreator((3,)).nnz, 0)
assert_equal(spcreator((3,)).count_nonzero(), 0)
def test_invalid_shapes(self, spcreator):
with pytest.raises(ValueError, match='elements cannot be negative'):
spcreator((-3,))
def test_repr(self, spcreator, dat1d):
repr(spcreator(dat1d))
def test_str(self, spcreator, dat1d):
str(spcreator(dat1d))
def test_neg(self, spcreator):
A = np.array([-1, 0, 17, 0, -5, 0, 1, -4, 0, 0, 0, 0], 'd')
assert_equal(-A, (-spcreator(A)).toarray())
def test_1d_supported_init(self, spcreator):
A = spcreator([0, 1, 2, 3])
assert A.ndim == 1
def test_reshape_1d_tofrom_row_or_column(self, spcreator):
# add a dimension 1d->2d
x = spcreator([1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5])
y = x.reshape(1, 12)
desired = [[1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5]]
assert_equal(y.toarray(), desired)
# remove a size-1 dimension 2d->1d
x = spcreator(desired)
y = x.reshape(12)
assert_equal(y.toarray(), desired[0])
y2 = x.reshape((12,))
assert y.shape == y2.shape
# make a 2d column into 1d. 2d->1d
y = x.T.reshape(12)
assert_equal(y.toarray(), desired[0])
def test_reshape(self, spcreator):
x = spcreator([1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5])
y = x.reshape((4, 3))
desired = [[1, 0, 7], [0, 0, 0], [0, -3, 0], [0, 0, 5]]
assert_equal(y.toarray(), desired)
y = x.reshape((12,))
assert y is x
y = x.reshape(12)
assert_equal(y.toarray(), x.toarray())
def test_sum(self, spcreator):
np.random.seed(1234)
dat_1 = np.array([0, 1, 2, 3, -4, 5, -6, 7, 9])
dat_2 = np.random.rand(5)
dat_3 = np.array([])
dat_4 = np.zeros((40,))
arrays = [dat_1, dat_2, dat_3, dat_4]
for dat in arrays:
datsp = spcreator(dat)
with np.errstate(over='ignore'):
assert np.isscalar(datsp.sum())
assert_allclose(dat.sum(), datsp.sum())
assert_allclose(dat.sum(axis=None), datsp.sum(axis=None))
assert_allclose(dat.sum(axis=0), datsp.sum(axis=0))
assert_allclose(dat.sum(axis=-1), datsp.sum(axis=-1))
# test `out` parameter
datsp.sum(axis=0, out=np.zeros(()))
def test_sum_invalid_params(self, spcreator):
out = np.zeros((3,)) # wrong size for out
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
with pytest.raises(ValueError, match='axis out of range'):
datsp.sum(axis=1)
with pytest.raises(ValueError, match='axis out of range'):
datsp.sum(axis=(0, 3))
with pytest.raises(TypeError, match='axis must be an integer'):
datsp.sum(axis=1.5)
with pytest.raises(ValueError, match='output parameter.*wrong.*dimension'):
datsp.sum(axis=0, out=out)
def test_numpy_sum(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
dat_sum = np.sum(dat)
datsp_sum = np.sum(datsp)
assert_allclose(dat_sum, datsp_sum)
def test_mean(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
assert_allclose(dat.mean(), datsp.mean())
assert np.isscalar(datsp.mean(axis=None))
assert_allclose(dat.mean(axis=None), datsp.mean(axis=None))
assert_allclose(dat.mean(axis=0), datsp.mean(axis=0))
assert_allclose(dat.mean(axis=-1), datsp.mean(axis=-1))
with pytest.raises(ValueError, match='axis'):
datsp.mean(axis=1)
with pytest.raises(ValueError, match='axis'):
datsp.mean(axis=-2)
def test_mean_invalid_params(self, spcreator):
out = np.asarray(np.zeros((1, 3)))
dat = np.array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]])
datsp = spcreator(dat)
with pytest.raises(ValueError, match='axis out of range'):
datsp.mean(axis=3)
with pytest.raises(ValueError, match='axis out of range'):
datsp.mean(axis=(0, 3))
with pytest.raises(TypeError, match='axis must be an integer'):
datsp.mean(axis=1.5)
with pytest.raises(ValueError, match='out.*not match shape'):
datsp.mean(axis=1, out=out)
def test_sum_dtype(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
for dtype in supported_dtypes:
dat_sum = dat.sum(dtype=dtype)
datsp_sum = datsp.sum(dtype=dtype)
assert_allclose(dat_sum, datsp_sum)
assert_equal(dat_sum.dtype, datsp_sum.dtype)
def test_mean_dtype(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
for dtype in supported_dtypes:
dat_mean = dat.mean(dtype=dtype)
datsp_mean = datsp.mean(dtype=dtype)
assert_allclose(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
def test_mean_out(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
dat_out = np.array(0)
datsp_out = np.array(0)
dat.mean(out=dat_out)
datsp.mean(out=datsp_out)
assert_allclose(dat_out, datsp_out)
dat.mean(axis=0, out=dat_out)
datsp.mean(axis=0, out=datsp_out)
assert_allclose(dat_out, datsp_out)
with pytest.raises(ValueError, match="output parameter.*dimension"):
datsp.mean(out=np.array([0]))
with pytest.raises(ValueError, match="output parameter.*dimension"):
datsp.mean(out=np.array([[0]]))
def test_numpy_mean(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
dat_mean = np.mean(dat)
datsp_mean = np.mean(datsp)
assert_allclose(dat_mean, datsp_mean)
assert_equal(dat_mean.dtype, datsp_mean.dtype)
@pytest.mark.thread_unsafe
@sup_complex
def test_from_array(self, spcreator):
A = np.array([2, 3, 4])
assert_equal(spcreator(A).toarray(), A)
A = np.array([1.0 + 3j, 0, -1])
assert_equal(spcreator(A).toarray(), A)
assert_equal(spcreator(A, dtype='int16').toarray(), A.astype('int16'))
@pytest.mark.thread_unsafe
@sup_complex
def test_from_list(self, spcreator):
A = [2, 3, 4]
assert_equal(spcreator(A).toarray(), A)
A = [1.0 + 3j, 0, -1]
assert_equal(spcreator(A).toarray(), np.array(A))
assert_equal(
spcreator(A, dtype='int16').toarray(), np.array(A).astype('int16')
)
@pytest.mark.thread_unsafe
@sup_complex
def test_from_sparse(self, spcreator):
D = np.array([1, 0, 0])
S = coo_array(D)
assert_equal(spcreator(S).toarray(), D)
S = spcreator(D)
assert_equal(spcreator(S).toarray(), D)
D = np.array([1.0 + 3j, 0, -1])
S = coo_array(D)
assert_equal(spcreator(S).toarray(), D)
assert_equal(spcreator(S, dtype='int16').toarray(), D.astype('int16'))
S = spcreator(D)
assert_equal(spcreator(S).toarray(), D)
assert_equal(spcreator(S, dtype='int16').toarray(), D.astype('int16'))
def test_toarray(self, spcreator, dat1d):
datsp = spcreator(dat1d)
# Check C- or F-contiguous (default).
chk = datsp.toarray()
assert_equal(chk, dat1d)
assert chk.flags.c_contiguous == chk.flags.f_contiguous
# Check C-contiguous (with arg).
chk = datsp.toarray(order='C')
assert_equal(chk, dat1d)
assert chk.flags.c_contiguous
assert chk.flags.f_contiguous
# Check F-contiguous (with arg).
chk = datsp.toarray(order='F')
assert_equal(chk, dat1d)
assert chk.flags.c_contiguous
assert chk.flags.f_contiguous
# Check with output arg.
out = np.zeros(datsp.shape, dtype=datsp.dtype)
datsp.toarray(out=out)
assert_equal(out, dat1d)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.0
datsp.toarray(out=out)
assert_equal(out, dat1d)
# np.dot does not work with sparse matrices (unless scalars)
# so this is testing whether dat1d matches datsp.toarray()
a = np.array([1.0, 2.0, 3.0, 4.0])
dense_dot_dense = np.dot(a, dat1d)
check = np.dot(a, datsp.toarray())
assert_equal(dense_dot_dense, check)
b = np.array([1.0, 2.0, 3.0, 4.0])
dense_dot_dense = np.dot(dat1d, b)
check = np.dot(datsp.toarray(), b)
assert_equal(dense_dot_dense, check)
# Check bool data works.
spbool = spcreator(dat1d, dtype=bool)
arrbool = dat1d.astype(bool)
assert_equal(spbool.toarray(), arrbool)
def test_add(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
a = dat.copy()
a[0] = 2.0
b = datsp
c = b + a
assert_equal(c, b.toarray() + a)
# test broadcasting
# Note: cant add nonzero scalar to sparray. Can add len 1 array
c = b + a[0:1]
assert_equal(c, b.toarray() + a[0])
def test_radd(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
a = dat.copy()
a[0] = 2.0
b = datsp
c = a + b
assert_equal(c, a + b.toarray())
def test_rsub(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
if dtype == np.dtype('bool'):
# boolean array subtraction deprecated in 1.9.0
continue
assert_equal((dat - datsp), [0, 0, 0, 0])
assert_equal((datsp - dat), [0, 0, 0, 0])
assert_equal((0 - datsp).toarray(), -dat)
A = spcreator([1, -4, 0, 2], dtype='d')
assert_equal((dat - A), dat - A.toarray())
assert_equal((A - dat), A.toarray() - dat)
assert_equal(A.toarray() - datsp, A.toarray() - dat)
assert_equal(datsp - A.toarray(), dat - A.toarray())
# test broadcasting
assert_equal(dat[:1] - datsp, dat[:1] - dat)
def test_matmul_basic(self, spcreator):
A = np.array([[2, 0, 3.0], [0, 0, 0], [0, 1, 2]])
v = np.array([1, 0, 3])
Asp = spcreator(A)
vsp = spcreator(v)
# sparse result when both args are sparse and result not scalar
assert_equal((Asp @ vsp).toarray(), A @ v)
assert_equal(A @ vsp, A @ v)
assert_equal(Asp @ v, A @ v)
assert_equal((vsp @ Asp).toarray(), v @ A)
assert_equal(vsp @ A, v @ A)
assert_equal(v @ Asp, v @ A)
assert_equal(vsp @ vsp, v @ v)
assert_equal(v @ vsp, v @ v)
assert_equal(vsp @ v, v @ v)
assert_equal((Asp @ Asp).toarray(), A @ A)
assert_equal(A @ Asp, A @ A)
assert_equal(Asp @ A, A @ A)
def test_matvec(self, spcreator):
A = np.array([2, 0, 3.0])
Asp = spcreator(A)
col = np.array([[1, 2, 3]]).T
assert_allclose(Asp @ col, Asp.toarray() @ col)
assert (A @ np.array([1, 2, 3])).shape == ()
assert Asp @ np.array([1, 2, 3]) == 11
assert (Asp @ np.array([1, 2, 3])).shape == ()
assert (Asp @ np.array([[1], [2], [3]])).shape == (1,)
# check result type
assert isinstance(Asp @ matrix([[1, 2, 3]]).T, np.ndarray)
# ensure exception is raised for improper dimensions
bad_vecs = [np.array([1, 2]), np.array([1, 2, 3, 4]), np.array([[1], [2]])]
for x in bad_vecs:
with pytest.raises(ValueError, match='dimension mismatch'):
Asp @ x
# The current relationship between sparse matrix products and array
# products is as follows:
dot_result = np.dot(Asp.toarray(), [1, 2, 3])
assert_allclose(Asp @ np.array([1, 2, 3]), dot_result)
assert_allclose(Asp @ [[1], [2], [3]], dot_result.T)
# Note that the result of Asp @ x is dense if x has a singleton dimension.
def test_rmatvec(self, spcreator, dat1d):
M = spcreator(dat1d)
assert_allclose([1, 2, 3, 4] @ M, np.dot([1, 2, 3, 4], M.toarray()))
row = np.array([[1, 2, 3, 4]])
assert_allclose(row @ M, row @ M.toarray())
def test_transpose(self, spcreator, dat1d):
for A in [dat1d, np.array([])]:
B = spcreator(A)
assert_equal(B.toarray(), A)
assert_equal(B.transpose().toarray(), A)
assert_equal(B.dtype, A.dtype)
def test_add_dense_to_sparse(self, spcreator, datsp_math_dtypes):
for dtype, dat, datsp in datsp_math_dtypes[spcreator]:
sum1 = dat + datsp
assert_equal(sum1, dat + dat)
sum2 = datsp + dat
assert_equal(sum2, dat + dat)
def test_iterator(self, spcreator):
# test that __iter__ is compatible with NumPy
B = np.arange(5)
A = spcreator(B)
if A.format not in ['coo', 'dia', 'bsr']:
for x, y in zip(A, B):
assert_equal(x, y)
def test_resize(self, spcreator):
# resize(shape) resizes the matrix in-place
D = np.array([1, 0, 3, 4])
S = spcreator(D)
assert S.resize((3,)) is None
assert_equal(S.toarray(), [1, 0, 3])
S.resize((5,))
assert_equal(S.toarray(), [1, 0, 3, 0, 0])

View file

@ -0,0 +1,872 @@
"""test sparse matrix construction functions"""
import numpy as np
from numpy import array
from numpy.testing import (assert_equal, assert_,
assert_array_equal, assert_array_almost_equal_nulp)
import pytest
from pytest import raises as assert_raises
from scipy._lib._testutils import check_free_memory
from scipy.sparse import (csr_matrix, coo_matrix,
csr_array, coo_array,
csc_array, bsr_array,
dia_array, dok_array,
lil_array, csc_matrix,
bsr_matrix, dia_matrix,
lil_matrix, sparray, spmatrix,
_construct as construct)
from scipy.sparse._construct import rand as sprand
sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok']
#TODO check whether format=XXX is respected
def _sprandn(m, n, density=0.01, format="coo", dtype=None, rng=None):
# Helper function for testing.
rng = np.random.default_rng(rng)
data_rvs = rng.standard_normal
return construct.random(m, n, density, format, dtype, rng, data_rvs)
def _sprandn_array(m, n, density=0.01, format="coo", dtype=None, rng=None):
# Helper function for testing.
rng = np.random.default_rng(rng)
data_sampler = rng.standard_normal
return construct.random_array((m, n), density=density, format=format, dtype=dtype,
rng=rng, data_sampler=data_sampler)
class TestConstructUtils:
@pytest.mark.parametrize("cls", [
csc_array, csr_array, coo_array, bsr_array,
dia_array, dok_array, lil_array
])
def test_singleton_array_constructor(self, cls):
with pytest.raises(
ValueError,
match=(
'scipy sparse array classes do not support '
'instantiation from a scalar'
)
):
cls(0)
@pytest.mark.parametrize("cls", [
csc_matrix, csr_matrix, coo_matrix,
bsr_matrix, dia_matrix, lil_matrix
])
def test_singleton_matrix_constructor(self, cls):
"""
This test is for backwards compatibility post scipy 1.13.
The behavior observed here is what is to be expected
with the older matrix classes. This test comes with the
exception of dok_matrix, which was not working pre scipy1.12
(unlike the rest of these).
"""
assert cls(0).shape == (1, 1)
def test_spdiags(self):
diags1 = array([[1, 2, 3, 4, 5]])
diags2 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10]])
diags3 = array([[1, 2, 3, 4, 5],
[6, 7, 8, 9,10],
[11,12,13,14,15]])
cases = []
cases.append((diags1, 0, 1, 1, [[1]]))
cases.append((diags1, [0], 1, 1, [[1]]))
cases.append((diags1, [0], 2, 1, [[1],[0]]))
cases.append((diags1, [0], 1, 2, [[1,0]]))
cases.append((diags1, [1], 1, 2, [[0,2]]))
cases.append((diags1,[-1], 1, 2, [[0,0]]))
cases.append((diags1, [0], 2, 2, [[1,0],[0,2]]))
cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]]))
cases.append((diags1, [3], 2, 2, [[0,0],[0,0]]))
cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]]))
cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]]))
cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]]))
cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0],
[0,0,0,4,0,0],
[0,0,0,0,5,0],
[6,0,0,0,0,0],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0],
[1, 7,13, 0, 0, 0],
[0, 2, 8,14, 0, 0],
[0, 0, 3, 9,15, 0],
[0, 0, 0, 4,10, 0],
[0, 0, 0, 0, 5, 0]]))
cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0],
[11, 0, 0, 9, 0],
[0,12, 0, 0,10],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
cases.append((diags3, [-1, 1, 2], len(diags3[0]), len(diags3[0]),
[[0, 7, 13, 0, 0],
[1, 0, 8, 14, 0],
[0, 2, 0, 9, 15],
[0, 0, 3, 0, 10],
[0, 0, 0, 4, 0]]))
for d, o, m, n, result in cases:
if len(d[0]) == m and m == n:
assert_equal(construct.spdiags(d, o).toarray(), result)
assert_equal(construct.spdiags(d, o, m, n).toarray(), result)
assert_equal(construct.spdiags(d, o, (m, n)).toarray(), result)
def test_diags(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append((a[:1], 0, (1, 1), [[1]]))
cases.append(([a[:1]], [0], (1, 1), [[1]]))
cases.append(([a[:1]], [0], (2, 1), [[1],[0]]))
cases.append(([a[:1]], [0], (1, 2), [[1,0]]))
cases.append(([a[:1]], [1], (1, 2), [[0,1]]))
cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]]))
cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]]))
cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]]))
cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]]))
cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]]))
cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]]))
cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]]))
cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]]))
cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]]))
cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]]))
cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]]))
cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]]))
cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]]))
cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]]))
cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]]))
cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]]))
cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]))
cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]]))
cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]]))
cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0],
[0,0,0,2,0,0],
[0,0,0,0,3,0],
[6,0,0,0,0,4],
[0,7,0,0,0,0],
[0,0,8,0,0,0]]))
cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0],
[1, 7,12, 0, 0],
[0, 2, 8,13, 0],
[0, 0, 3, 9,14],
[0, 0, 0, 4,10]]))
cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0],
[11, 0, 0, 7, 0],
[0,12, 0, 0, 8],
[0, 0,13, 0, 0],
[1, 0, 0,14, 0],
[0, 2, 0, 0,15]]))
# too long arrays are OK
cases.append(([a], [0], (1, 1), [[1]]))
cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]]))
cases.append((
np.array([[1, 2, 3], [4, 5, 6]]),
[0,-1],
(3, 3),
[[1, 0, 0], [4, 2, 0], [0, 5, 3]]
))
# scalar case: broadcasting
cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0],
[1, -2, 1],
[0, 1, -2]]))
for d, o, shape, result in cases:
err_msg = f"{d!r} {o!r} {shape!r} {result!r}"
assert_equal(construct.diags(d, offsets=o, shape=shape).toarray(),
result, err_msg=err_msg)
if (shape[0] == shape[1]
and hasattr(d[0], '__len__')
and len(d[0]) <= max(shape)):
# should be able to find the shape automatically
assert_equal(construct.diags(d, offsets=o).toarray(), result,
err_msg=err_msg)
def test_diags_default(self):
a = array([1, 2, 3, 4, 5])
assert_equal(construct.diags(a).toarray(), np.diag(a))
def test_diags_default_bad(self):
a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])
assert_raises(ValueError, construct.diags, a)
def test_diags_bad(self):
a = array([1, 2, 3, 4, 5])
b = array([6, 7, 8, 9, 10])
c = array([11, 12, 13, 14, 15])
cases = []
cases.append(([a[:0]], 0, (1, 1)))
cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5)))
cases.append(([a[:2],c,b[:3]], [-4,2,-1], None))
cases.append(([], [-4,2,-1], None))
cases.append(([1], [-5], (4, 4)))
cases.append(([a], 0, None))
for d, o, shape in cases:
assert_raises(ValueError, construct.diags, d, offsets=o, shape=shape)
assert_raises(TypeError, construct.diags, [[None]], offsets=[0])
def test_diags_vs_diag(self):
# Check that
#
# diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...
#
rng = np.random.RandomState(1234)
for n_diags in [1, 2, 3, 4, 5, 10]:
n = 1 + n_diags//2 + rng.randint(0, 10)
offsets = np.arange(-n+1, n-1)
rng.shuffle(offsets)
offsets = offsets[:n_diags]
diagonals = [rng.rand(n - abs(q)) for q in offsets]
mat = construct.diags(diagonals, offsets=offsets)
dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])
assert_array_almost_equal_nulp(mat.toarray(), dense_mat)
if len(offsets) == 1:
mat = construct.diags(diagonals[0], offsets=offsets[0])
dense_mat = np.diag(diagonals[0], offsets[0])
assert_array_almost_equal_nulp(mat.toarray(), dense_mat)
def test_diags_dtype(self):
x = construct.diags([2.2], offsets=[0], shape=(2, 2), dtype=int)
assert_equal(x.dtype, int)
assert_equal(x.toarray(), [[2, 0], [0, 2]])
def test_diags_one_diagonal(self):
d = list(range(5))
for k in range(-5, 6):
assert_equal(construct.diags(d, offsets=k).toarray(),
construct.diags([d], offsets=[k]).toarray())
def test_diags_empty(self):
x = construct.diags([])
assert_equal(x.shape, (0, 0))
@pytest.mark.parametrize("identity", [construct.identity, construct.eye_array])
def test_identity(self, identity):
assert_equal(identity(1).toarray(), [[1]])
assert_equal(identity(2).toarray(), [[1,0],[0,1]])
I = identity(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = identity(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
@pytest.mark.parametrize("eye", [construct.eye, construct.eye_array])
def test_eye(self, eye):
assert_equal(eye(1,1).toarray(), [[1]])
assert_equal(eye(2,3).toarray(), [[1,0,0],[0,1,0]])
assert_equal(eye(3,2).toarray(), [[1,0],[0,1],[0,0]])
assert_equal(eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]])
assert_equal(eye(3,3,dtype='int16').dtype, np.dtype('int16'))
for m in [3, 5]:
for n in [3, 5]:
for k in range(-5,6):
# scipy.sparse.eye deviates from np.eye here. np.eye will
# create arrays of all 0's when the diagonal offset is
# greater than the size of the array. For sparse arrays
# this makes less sense, especially as it results in dia
# arrays with negative diagonals. Therefore sp.sparse.eye
# validates that diagonal offsets fall within the shape of
# the array. See gh-18555.
if (k > 0 and k > n) or (k < 0 and abs(k) > m):
with pytest.raises(
ValueError, match="Offset.*out of bounds"
):
eye(m, n, k=k)
else:
assert_equal(
eye(m, n, k=k).toarray(),
np.eye(m, n, k=k)
)
if m == n:
assert_equal(
eye(m, k=k).toarray(),
np.eye(m, n, k=k)
)
@pytest.mark.parametrize("eye", [construct.eye, construct.eye_array])
def test_eye_one(self, eye):
assert_equal(eye(1).toarray(), [[1]])
assert_equal(eye(2).toarray(), [[1,0],[0,1]])
I = eye(3, dtype='int8', format='dia')
assert_equal(I.dtype, np.dtype('int8'))
assert_equal(I.format, 'dia')
for fmt in sparse_formats:
I = eye(3, format=fmt)
assert_equal(I.format, fmt)
assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]])
def test_eye_array_vs_matrix(self):
assert isinstance(construct.eye_array(3), sparray)
assert not isinstance(construct.eye(3), sparray)
def test_kron(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[0],[0]]))
cases.append(array([[0,0]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14]]))
cases.append(array([[5,4],[0,0],[6,0]]))
cases.append(array([[5,4,4],[1,0,0],[6,0,8]]))
cases.append(array([[0,1,0,2,0,5,8]]))
cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]]))
# test all cases with some formats
for a in cases:
ca = csr_array(a)
for b in cases:
cb = csr_array(b)
expected = np.kron(a, b)
for fmt in sparse_formats[1:4]:
result = construct.kron(ca, cb, format=fmt)
assert_equal(result.format, fmt)
assert_array_equal(result.toarray(), expected)
assert isinstance(result, sparray)
# test one case with all formats
a = cases[-1]
b = cases[-3]
ca = csr_array(a)
cb = csr_array(b)
expected = np.kron(a, b)
for fmt in sparse_formats:
result = construct.kron(ca, cb, format=fmt)
assert_equal(result.format, fmt)
assert_array_equal(result.toarray(), expected)
assert isinstance(result, sparray)
# check that spmatrix returned when both inputs are spmatrix
result = construct.kron(csr_matrix(a), csr_matrix(b), format=fmt)
assert_equal(result.format, fmt)
assert_array_equal(result.toarray(), expected)
assert isinstance(result, spmatrix)
def test_kron_ndim_exceptions(self):
with pytest.raises(ValueError, match='requires 2D input'):
construct.kron([[0], [1]], csr_array([0, 1]))
with pytest.raises(ValueError, match='requires 2D input'):
construct.kron(csr_array([0, 1]), [[0], [1]])
# no exception if sparse arrays are not input (spmatrix inferred)
construct.kron([[0], [1]], [0, 1])
def test_kron_large(self):
n = 2**16
a = construct.diags_array([1], shape=(1, n), offsets=n-1)
b = construct.diags_array([1], shape=(n, 1), offsets=1-n)
construct.kron(a, a)
construct.kron(b, b)
def test_kronsum(self):
cases = []
cases.append(array([[0]]))
cases.append(array([[-1]]))
cases.append(array([[4]]))
cases.append(array([[10]]))
cases.append(array([[1,2],[3,4]]))
cases.append(array([[0,2],[5,0]]))
cases.append(array([[0,2,-6],[8,0,14],[0,3,0]]))
cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]]))
# test all cases with default format
for a in cases:
for b in cases:
result = construct.kronsum(csr_array(a), csr_array(b)).toarray()
expected = (np.kron(np.eye(b.shape[0]), a)
+ np.kron(b, np.eye(a.shape[0])))
assert_array_equal(result, expected)
# check that spmatrix returned when both inputs are spmatrix
result = construct.kronsum(csr_matrix(a), csr_matrix(b)).toarray()
assert_array_equal(result, expected)
def test_kronsum_ndim_exceptions(self):
with pytest.raises(ValueError, match='requires 2D input'):
construct.kronsum([[0], [1]], csr_array([0, 1]))
with pytest.raises(ValueError, match='requires 2D input'):
construct.kronsum(csr_array([0, 1]), [[0], [1]])
# no exception if sparse arrays are not input (spmatrix inferred)
construct.kronsum([[0, 1], [1, 0]], [2])
@pytest.mark.parametrize("coo_cls", [coo_matrix, coo_array])
def test_vstack(self, coo_cls):
A = coo_cls([[1,2],[3,4]])
B = coo_cls([[5,6]])
expected = array([[1, 2],
[3, 4],
[5, 6]])
assert_equal(construct.vstack([A, B]).toarray(), expected)
assert_equal(construct.vstack([A, B], dtype=np.float32).dtype,
np.float32)
assert_equal(construct.vstack([A.todok(), B.todok()]).toarray(), expected)
assert_equal(construct.vstack([A.tocsr(), B.tocsr()]).toarray(),
expected)
result = construct.vstack([A.tocsr(), B.tocsr()],
format="csr", dtype=np.float32)
assert_equal(result.dtype, np.float32)
assert_equal(result.indices.dtype, np.int32)
assert_equal(result.indptr.dtype, np.int32)
assert_equal(construct.vstack([A.tocsc(), B.tocsc()]).toarray(),
expected)
result = construct.vstack([A.tocsc(), B.tocsc()],
format="csc", dtype=np.float32)
assert_equal(result.dtype, np.float32)
assert_equal(result.indices.dtype, np.int32)
assert_equal(result.indptr.dtype, np.int32)
def test_vstack_maintain64bit_idx_dtype(self):
# see gh-20389 v/hstack returns int32 idx_dtype with input int64 idx_dtype
X = csr_array([[1, 0, 0], [0, 1, 0], [0, 1, 0]])
X.indptr = X.indptr.astype(np.int64)
X.indices = X.indices.astype(np.int64)
assert construct.vstack([X, X]).indptr.dtype == np.int64
assert construct.hstack([X, X]).indptr.dtype == np.int64
X = csc_array([[1, 0, 0], [0, 1, 0], [0, 1, 0]])
X.indptr = X.indptr.astype(np.int64)
X.indices = X.indices.astype(np.int64)
assert construct.vstack([X, X]).indptr.dtype == np.int64
assert construct.hstack([X, X]).indptr.dtype == np.int64
X = coo_array([[1, 0, 0], [0, 1, 0], [0, 1, 0]])
X.coords = tuple(co.astype(np.int64) for co in X.coords)
assert construct.vstack([X, X]).coords[0].dtype == np.int64
assert construct.hstack([X, X]).coords[0].dtype == np.int64
def test_vstack_matrix_or_array(self):
A = [[1,2],[3,4]]
B = [[5,6]]
assert isinstance(construct.vstack([coo_array(A), coo_array(B)]), sparray)
assert isinstance(construct.vstack([coo_array(A), coo_matrix(B)]), sparray)
assert isinstance(construct.vstack([coo_matrix(A), coo_array(B)]), sparray)
assert isinstance(construct.vstack([coo_matrix(A), coo_matrix(B)]), spmatrix)
def test_vstack_1d_with_2d(self):
# fixes gh-21064
arr = csr_array([[1, 0, 0], [0, 1, 0]])
arr1d = csr_array([1, 0, 0])
arr1dcoo = coo_array([1, 0, 0])
assert construct.vstack([arr, np.array([0, 0, 0])]).shape == (3, 3)
assert construct.hstack([arr1d, np.array([[0]])]).shape == (1, 4)
assert construct.hstack([arr1d, arr1d]).shape == (1, 6)
assert construct.vstack([arr1d, arr1d]).shape == (2, 3)
# check csr specialty stacking code like _stack_along_minor_axis
assert construct.hstack([arr, arr]).shape == (2, 6)
assert construct.hstack([arr1d, arr1d]).shape == (1, 6)
assert construct.hstack([arr1d, arr1dcoo]).shape == (1, 6)
assert construct.vstack([arr, arr1dcoo]).shape == (3, 3)
assert construct.vstack([arr1d, arr1dcoo]).shape == (2, 3)
with pytest.raises(ValueError, match="incompatible row dimensions"):
construct.hstack([arr, np.array([0, 0])])
with pytest.raises(ValueError, match="incompatible column dimensions"):
construct.vstack([arr, np.array([0, 0])])
@pytest.mark.parametrize("coo_cls", [coo_matrix, coo_array])
def test_hstack(self, coo_cls):
A = coo_cls([[1,2],[3,4]])
B = coo_cls([[5],[6]])
expected = array([[1, 2, 5],
[3, 4, 6]])
assert_equal(construct.hstack([A, B]).toarray(), expected)
assert_equal(construct.hstack([A, B], dtype=np.float32).dtype,
np.float32)
assert_equal(construct.hstack([A.todok(), B.todok()]).toarray(), expected)
assert_equal(construct.hstack([A.tocsc(), B.tocsc()]).toarray(),
expected)
assert_equal(construct.hstack([A.tocsc(), B.tocsc()],
dtype=np.float32).dtype,
np.float32)
assert_equal(construct.hstack([A.tocsr(), B.tocsr()]).toarray(),
expected)
assert_equal(construct.hstack([A.tocsr(), B.tocsr()],
dtype=np.float32).dtype,
np.float32)
def test_hstack_matrix_or_array(self):
A = [[1,2],[3,4]]
B = [[5],[6]]
assert isinstance(construct.hstack([coo_array(A), coo_array(B)]), sparray)
assert isinstance(construct.hstack([coo_array(A), coo_matrix(B)]), sparray)
assert isinstance(construct.hstack([coo_matrix(A), coo_array(B)]), sparray)
assert isinstance(construct.hstack([coo_matrix(A), coo_matrix(B)]), spmatrix)
@pytest.mark.parametrize("block_array", (construct.bmat, construct.block_array))
def test_block_creation(self, block_array):
A = coo_array([[1, 2], [3, 4]])
B = coo_array([[5],[6]])
C = coo_array([[7]])
D = coo_array((0, 0))
expected = array([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
assert_equal(block_array([[A, B], [None, C]]).toarray(), expected)
E = csr_array((1, 2), dtype=np.int32)
assert_equal(block_array([[A.tocsr(), B.tocsr()],
[E, C.tocsr()]]).toarray(),
expected)
assert_equal(block_array([[A.tocsc(), B.tocsc()],
[E.tocsc(), C.tocsc()]]).toarray(),
expected)
expected = array([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
assert_equal(block_array([[A, None], [None, C]]).toarray(), expected)
assert_equal(block_array([[A.tocsr(), E.T.tocsr()],
[E, C.tocsr()]]).toarray(),
expected)
assert_equal(block_array([[A.tocsc(), E.T.tocsc()],
[E.tocsc(), C.tocsc()]]).toarray(),
expected)
Z = csr_array((1, 1), dtype=np.int32)
expected = array([[0, 5],
[0, 6],
[7, 0]])
assert_equal(block_array([[None, B], [C, None]]).toarray(), expected)
assert_equal(block_array([[E.T.tocsr(), B.tocsr()],
[C.tocsr(), Z]]).toarray(),
expected)
assert_equal(block_array([[E.T.tocsc(), B.tocsc()],
[C.tocsc(), Z.tocsc()]]).toarray(),
expected)
expected = np.empty((0, 0))
assert_equal(block_array([[None, None]]).toarray(), expected)
assert_equal(block_array([[None, D], [D, None]]).toarray(),
expected)
# test bug reported in gh-5976
expected = array([[7]])
assert_equal(block_array([[None, D], [C, None]]).toarray(),
expected)
# test failure cases
with assert_raises(ValueError) as excinfo:
block_array([[A], [B]])
excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2')
with assert_raises(ValueError) as excinfo:
block_array([[A.tocsr()], [B.tocsr()]])
excinfo.match(r'incompatible dimensions for axis 1')
with assert_raises(ValueError) as excinfo:
block_array([[A.tocsc()], [B.tocsc()]])
excinfo.match(r'Mismatching dimensions along axis 1: ({1, 2}|{2, 1})')
with assert_raises(ValueError) as excinfo:
block_array([[A, C]])
excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2')
with assert_raises(ValueError) as excinfo:
block_array([[A.tocsr(), C.tocsr()]])
excinfo.match(r'Mismatching dimensions along axis 0: ({1, 2}|{2, 1})')
with assert_raises(ValueError) as excinfo:
block_array([[A.tocsc(), C.tocsc()]])
excinfo.match(r'incompatible dimensions for axis 0')
def test_block_return_type(self):
block = construct.block_array
# csr format ensures we hit _compressed_sparse_stack
# shape of F,G ensure we hit _stack_along_minor_axis
# list version ensure we hit the path with neither helper function
Fl, Gl = [[1, 2],[3, 4]], [[7], [5]]
Fm, Gm = csr_matrix(Fl), csr_matrix(Gl)
assert isinstance(block([[None, Fl], [Gl, None]], format="csr"), sparray)
assert isinstance(block([[None, Fm], [Gm, None]], format="csr"), sparray)
assert isinstance(block([[Fm, Gm]], format="csr"), sparray)
def test_bmat_return_type(self):
"""This can be removed after sparse matrix is removed"""
bmat = construct.bmat
# check return type. if any input _is_array output array, else matrix
Fl, Gl = [[1, 2],[3, 4]], [[7], [5]]
Fm, Gm = csr_matrix(Fl), csr_matrix(Gl)
Fa, Ga = csr_array(Fl), csr_array(Gl)
assert isinstance(bmat([[Fa, Ga]], format="csr"), sparray)
assert isinstance(bmat([[Fm, Gm]], format="csr"), spmatrix)
assert isinstance(bmat([[None, Fa], [Ga, None]], format="csr"), sparray)
assert isinstance(bmat([[None, Fm], [Ga, None]], format="csr"), sparray)
assert isinstance(bmat([[None, Fm], [Gm, None]], format="csr"), spmatrix)
assert isinstance(bmat([[None, Fl], [Gl, None]], format="csr"), spmatrix)
# type returned by _compressed_sparse_stack (all csr)
assert isinstance(bmat([[Ga, Ga]], format="csr"), sparray)
assert isinstance(bmat([[Gm, Ga]], format="csr"), sparray)
assert isinstance(bmat([[Ga, Gm]], format="csr"), sparray)
assert isinstance(bmat([[Gm, Gm]], format="csr"), spmatrix)
# shape is 2x2 so no _stack_along_minor_axis
assert isinstance(bmat([[Fa, Fm]], format="csr"), sparray)
assert isinstance(bmat([[Fm, Fm]], format="csr"), spmatrix)
# type returned by _compressed_sparse_stack (all csc)
assert isinstance(bmat([[Gm.tocsc(), Ga.tocsc()]], format="csc"), sparray)
assert isinstance(bmat([[Gm.tocsc(), Gm.tocsc()]], format="csc"), spmatrix)
# shape is 2x2 so no _stack_along_minor_axis
assert isinstance(bmat([[Fa.tocsc(), Fm.tocsc()]], format="csr"), sparray)
assert isinstance(bmat([[Fm.tocsc(), Fm.tocsc()]], format="csr"), spmatrix)
# type returned when mixed input
assert isinstance(bmat([[Gl, Ga]], format="csr"), sparray)
assert isinstance(bmat([[Gm.tocsc(), Ga]], format="csr"), sparray)
assert isinstance(bmat([[Gm.tocsc(), Gm]], format="csr"), spmatrix)
assert isinstance(bmat([[Gm, Gm]], format="csc"), spmatrix)
@pytest.mark.xslow
@pytest.mark.thread_unsafe
@pytest.mark.xfail_on_32bit("Can't create large array for test")
def test_concatenate_int32_overflow(self):
""" test for indptr overflow when concatenating matrices """
check_free_memory(30000)
n = 33000
A = csr_array(np.ones((n, n), dtype=bool))
B = A.copy()
C = construct._compressed_sparse_stack((A, B), axis=0,
return_spmatrix=False)
assert_(np.all(np.equal(np.diff(C.indptr), n)))
assert_equal(C.indices.dtype, np.int64)
assert_equal(C.indptr.dtype, np.int64)
def test_block_diag_basic(self):
""" basic test for block_diag """
A = coo_array([[1,2],[3,4]])
B = coo_array([[5],[6]])
C = coo_array([[7]])
expected = array([[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 6, 0],
[0, 0, 0, 7]])
ABC = construct.block_diag((A, B, C))
assert_equal(ABC.toarray(), expected)
assert ABC.coords[0].dtype == np.int32
def test_block_diag_idx_dtype(self):
X = coo_array([[1, 0, 0], [0, 1, 0], [0, 1, 0]])
X.coords = tuple(co.astype(np.int64) for co in X.coords)
assert construct.block_diag([X, X]).coords[0].dtype == np.int64
def test_block_diag_scalar_1d_args(self):
""" block_diag with scalar and 1d arguments """
# one 1d matrix and a scalar
assert_array_equal(construct.block_diag([[2,3], 4]).toarray(),
[[2, 3, 0], [0, 0, 4]])
# 1d sparse arrays
A = coo_array([1,0,3])
B = coo_array([0,4])
assert_array_equal(construct.block_diag([A, B]).toarray(),
[[1, 0, 3, 0, 0], [0, 0, 0, 0, 4]])
def test_block_diag_1(self):
""" block_diag with one matrix """
assert_equal(construct.block_diag([[1, 0]]).toarray(),
array([[1, 0]]))
assert_equal(construct.block_diag([[[1, 0]]]).toarray(),
array([[1, 0]]))
assert_equal(construct.block_diag([[[1], [0]]]).toarray(),
array([[1], [0]]))
# just on scalar
assert_equal(construct.block_diag([1]).toarray(),
array([[1]]))
def test_block_diag_sparse_arrays(self):
""" block_diag with sparse arrays """
A = coo_array([[1, 2, 3]], shape=(1, 3))
B = coo_array([[4, 5]], shape=(1, 2))
assert_equal(construct.block_diag([A, B]).toarray(),
array([[1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]))
A = coo_array([[1], [2], [3]], shape=(3, 1))
B = coo_array([[4], [5]], shape=(2, 1))
assert_equal(construct.block_diag([A, B]).toarray(),
array([[1, 0], [2, 0], [3, 0], [0, 4], [0, 5]]))
def test_block_diag_return_type(self):
A, B = coo_array([[1, 2, 3]]), coo_matrix([[2, 3, 4]])
assert isinstance(construct.block_diag([A, A]), sparray)
assert isinstance(construct.block_diag([A, B]), sparray)
assert isinstance(construct.block_diag([B, A]), sparray)
assert isinstance(construct.block_diag([B, B]), spmatrix)
def test_random_sampling(self):
# Simple sanity checks for sparse random sampling.
for f in sprand, _sprandn:
for t in [np.float32, np.float64, np.longdouble,
np.int32, np.int64, np.complex64, np.complex128]:
x = f(5, 10, density=0.1, dtype=t)
assert_equal(x.dtype, t)
assert_equal(x.shape, (5, 10))
assert_equal(x.nnz, 5)
x1 = f(5, 10, density=0.1, rng=4321)
assert_equal(x1.dtype, np.float64)
x2 = f(5, 10, density=0.1, rng=np.random.default_rng(4321))
assert_array_equal(x1.data, x2.data)
assert_array_equal(x1.row, x2.row)
assert_array_equal(x1.col, x2.col)
for density in [0.0, 0.1, 0.5, 1.0]:
x = f(5, 10, density=density)
assert_equal(x.nnz, int(density * np.prod(x.shape)))
for fmt in ['coo', 'csc', 'csr', 'lil']:
x = f(5, 10, format=fmt)
assert_equal(x.format, fmt)
assert_raises(ValueError, lambda: f(5, 10, 1.1))
assert_raises(ValueError, lambda: f(5, 10, -0.1))
@pytest.mark.parametrize("rng", [None, 4321, np.random.default_rng(4321)])
def test_rand(self, rng):
# Simple distributional checks for sparse.rand.
x = sprand(10, 20, density=0.5, dtype=np.float64, rng=rng)
assert_(np.all(np.less_equal(0, x.data)))
assert_(np.all(np.less_equal(x.data, 1)))
@pytest.mark.parametrize("rng", [None, 4321, np.random.default_rng(4321)])
def test_randn(self, rng):
# Simple distributional checks for sparse.randn.
# Statistically, some of these should be negative
# and some should be greater than 1.
x = _sprandn(10, 20, density=0.5, dtype=np.float64, rng=rng)
assert_(np.any(np.less(x.data, 0)))
assert_(np.any(np.less(1, x.data)))
x = _sprandn_array(10, 20, density=0.5, dtype=np.float64, rng=rng)
assert_(np.any(np.less(x.data, 0)))
assert_(np.any(np.less(1, x.data)))
def test_random_accept_str_dtype(self):
# anything that np.dtype can convert to a dtype should be accepted
# for the dtype
construct.random(10, 10, dtype='d')
construct.random_array((10, 10), dtype='d')
construct.random_array((10, 10, 10), dtype='d')
construct.random_array((10, 10, 10, 10, 10), dtype='d')
def test_random_array_maintains_array_shape(self):
# preserve use of old random_state during SPEC 7 transition
arr = construct.random_array((0, 4), density=0.3, dtype=int, random_state=0)
assert arr.shape == (0, 4)
arr = construct.random_array((10, 10, 10), density=0.3, dtype=int, rng=0)
assert arr.shape == (10, 10, 10)
arr = construct.random_array((10, 10, 10, 10, 10), density=0.3, dtype=int,
rng=0)
assert arr.shape == (10, 10, 10, 10, 10)
def test_random_array_idx_dtype(self):
A = construct.random_array((10, 10))
assert A.coords[0].dtype == np.int32
def test_random_sparse_matrix_returns_correct_number_of_non_zero_elements(self):
# A 10 x 10 matrix, with density of 12.65%, should have 13 nonzero elements.
# 10 x 10 x 0.1265 = 12.65, which should be rounded up to 13, not 12.
sparse_matrix = construct.random(10, 10, density=0.1265)
assert_equal(sparse_matrix.count_nonzero(),13)
# check random_array
sparse_array = construct.random_array((10, 10), density=0.1265)
assert_equal(sparse_array.count_nonzero(),13)
assert isinstance(sparse_array, sparray)
# check big size
shape = (2**33, 2**33)
sparse_array = construct.random_array(shape, density=2.7105e-17)
assert_equal(sparse_array.count_nonzero(),2000)
# for n-D
# check random_array
sparse_array = construct.random_array((10, 10, 10, 10), density=0.12658)
assert_equal(sparse_array.count_nonzero(),1266)
assert isinstance(sparse_array, sparray)
# check big size
shape = (2**33, 2**33, 2**33)
sparse_array = construct.random_array(shape, density=2.7105e-28)
assert_equal(sparse_array.count_nonzero(),172)
def test_diags_array():
"""Tests of diags_array that do not rely on diags wrapper."""
diag = np.arange(1, 5)
assert_array_equal(construct.diags_array(diag).toarray(), np.diag(diag))
assert_array_equal(
construct.diags_array(diag, offsets=2).toarray(), np.diag(diag, k=2)
)
assert_array_equal(
construct.diags_array(diag, offsets=2, shape=(4, 4)).toarray(),
np.diag(diag, k=2)[:4, :4]
)
# Offset outside bounds when shape specified
with pytest.raises(ValueError, match=".*out of bounds"):
construct.diags(np.arange(1, 5), 5, shape=(4, 4))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,98 @@
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
import pytest
def test_csc_getrow():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsc = csc_matrix(X)
for i in range(N):
arr_row = X[i:i + 1, :]
csc_row = Xcsc.getrow(i)
assert_array_almost_equal(arr_row, csc_row.toarray())
assert_(type(csc_row) is csr_matrix)
def test_csc_getcol():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsc = csc_matrix(X)
for i in range(N):
arr_col = X[:, i:i + 1]
csc_col = Xcsc.getcol(i)
assert_array_almost_equal(arr_col, csc_col.toarray())
assert_(type(csc_col) is csc_matrix)
@pytest.mark.parametrize("matrix_input, axis, expected_shape",
[(csc_matrix([[1, 0],
[0, 0],
[0, 2]]),
0, (0, 2)),
(csc_matrix([[1, 0],
[0, 0],
[0, 2]]),
1, (3, 0)),
(csc_matrix([[1, 0],
[0, 0],
[0, 2]]),
'both', (0, 0)),
(csc_matrix([[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 2, 3, 0, 1]]),
0, (0, 6))])
def test_csc_empty_slices(matrix_input, axis, expected_shape):
# see gh-11127 for related discussion
slice_1 = matrix_input.toarray().shape[0] - 1
slice_2 = slice_1
slice_3 = slice_2 - 1
if axis == 0:
actual_shape_1 = matrix_input[slice_1:slice_2, :].toarray().shape
actual_shape_2 = matrix_input[slice_1:slice_3, :].toarray().shape
elif axis == 1:
actual_shape_1 = matrix_input[:, slice_1:slice_2].toarray().shape
actual_shape_2 = matrix_input[:, slice_1:slice_3].toarray().shape
elif axis == 'both':
actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].toarray().shape
actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].toarray().shape
assert actual_shape_1 == expected_shape
assert actual_shape_1 == actual_shape_2
@pytest.mark.parametrize('ax', (-2, -1, 0, 1, None))
def test_argmax_overflow(ax):
# See gh-13646: Windows integer overflow for large sparse matrices.
dim = (100000, 100000)
A = lil_matrix(dim)
A[-2, -2] = 42
A[-3, -3] = 0.1234
A = csc_matrix(A)
idx = A.argmax(axis=ax)
if ax is None:
# idx is a single flattened index
# that we need to convert to a 2d index pair;
# can't do this with np.unravel_index because
# the dimensions are too large
ii = idx % dim[0]
jj = idx // dim[0]
else:
# idx is an array of size of A.shape[ax];
# check the max index to make sure no overflows
# we encountered
assert np.count_nonzero(idx) == A.nnz
ii, jj = np.max(idx), np.argmax(idx)
assert A[ii, jj] == A[-2, -2]

View file

@ -0,0 +1,214 @@
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_, assert_array_equal
from scipy.sparse import csr_matrix, csc_matrix, csr_array, csc_array, hstack
from scipy import sparse
import pytest
def _check_csr_rowslice(i, sl, X, Xcsr):
np_slice = X[i, sl]
csr_slice = Xcsr[i, sl]
assert_array_almost_equal(np_slice, csr_slice.toarray()[0])
assert_(type(csr_slice) is csr_matrix)
def test_csr_rowslice():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
slices = [slice(None, None, None),
slice(None, None, -1),
slice(1, -2, 2),
slice(-2, 1, -2)]
for i in range(N):
for sl in slices:
_check_csr_rowslice(i, sl, X, Xcsr)
def test_csr_getrow():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_row = X[i:i + 1, :]
csr_row = Xcsr.getrow(i)
assert_array_almost_equal(arr_row, csr_row.toarray())
assert_(type(csr_row) is csr_matrix)
def test_csr_getcol():
N = 10
np.random.seed(0)
X = np.random.random((N, N))
X[X > 0.7] = 0
Xcsr = csr_matrix(X)
for i in range(N):
arr_col = X[:, i:i + 1]
csr_col = Xcsr.getcol(i)
assert_array_almost_equal(arr_col, csr_col.toarray())
assert_(type(csr_col) is csr_matrix)
@pytest.mark.parametrize("matrix_input, axis, expected_shape",
[(csr_matrix([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 2, 3, 0]]),
0, (0, 4)),
(csr_matrix([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 2, 3, 0]]),
1, (3, 0)),
(csr_matrix([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 2, 3, 0]]),
'both', (0, 0)),
(csr_matrix([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 2, 3, 0]]),
0, (0, 5))])
def test_csr_empty_slices(matrix_input, axis, expected_shape):
# see gh-11127 for related discussion
slice_1 = matrix_input.toarray().shape[0] - 1
slice_2 = slice_1
slice_3 = slice_2 - 1
if axis == 0:
actual_shape_1 = matrix_input[slice_1:slice_2, :].toarray().shape
actual_shape_2 = matrix_input[slice_1:slice_3, :].toarray().shape
elif axis == 1:
actual_shape_1 = matrix_input[:, slice_1:slice_2].toarray().shape
actual_shape_2 = matrix_input[:, slice_1:slice_3].toarray().shape
elif axis == 'both':
actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].toarray().shape
actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].toarray().shape
assert actual_shape_1 == expected_shape
assert actual_shape_1 == actual_shape_2
def test_csr_bool_indexing():
data = csr_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
list_indices1 = [False, True, False]
array_indices1 = np.array(list_indices1)
list_indices2 = [[False, True, False], [False, True, False], [False, True, False]]
array_indices2 = np.array(list_indices2)
list_indices3 = ([False, True, False], [False, True, False])
array_indices3 = (np.array(list_indices3[0]), np.array(list_indices3[1]))
slice_list1 = data[list_indices1].toarray()
slice_array1 = data[array_indices1].toarray()
slice_list2 = data[list_indices2]
slice_array2 = data[array_indices2]
slice_list3 = data[list_indices3]
slice_array3 = data[array_indices3]
assert (slice_list1 == slice_array1).all()
assert (slice_list2 == slice_array2).all()
assert (slice_list3 == slice_array3).all()
def test_csr_hstack_int64():
"""
Tests if hstack properly promotes to indices and indptr arrays to np.int64
when using np.int32 during concatenation would result in either array
overflowing.
"""
max_int32 = np.iinfo(np.int32).max
# First case: indices would overflow with int32
data = [1.0]
row = [0]
max_indices_1 = max_int32 - 1
max_indices_2 = 3
# Individual indices arrays are representable with int32
col_1 = [max_indices_1 - 1]
col_2 = [max_indices_2 - 1]
X_1 = csr_matrix((data, (row, col_1)))
X_2 = csr_matrix((data, (row, col_2)))
assert max(max_indices_1 - 1, max_indices_2 - 1) < max_int32
assert X_1.indices.dtype == X_1.indptr.dtype == np.int32
assert X_2.indices.dtype == X_2.indptr.dtype == np.int32
# ... but when concatenating their CSR matrices, the resulting indices
# array can't be represented with int32 and must be promoted to int64.
X_hs = hstack([X_1, X_2], format="csr")
assert X_hs.indices.max() == max_indices_1 + max_indices_2 - 1
assert max_indices_1 + max_indices_2 - 1 > max_int32
assert X_hs.indices.dtype == X_hs.indptr.dtype == np.int64
# Even if the matrices are empty, we must account for their size
# contribution so that we may safely set the final elements.
X_1_empty = csr_matrix(X_1.shape)
X_2_empty = csr_matrix(X_2.shape)
X_hs_empty = hstack([X_1_empty, X_2_empty], format="csr")
assert X_hs_empty.shape == X_hs.shape
assert X_hs_empty.indices.dtype == np.int64
# Should be just small enough to stay in int32 after stack. Note that
# we theoretically could support indices.max() == max_int32, but due to an
# edge-case in the underlying sparsetools code
# (namely the `coo_tocsr` routine),
# we require that max(X_hs_32.shape) < max_int32 as well.
# Hence we can only support max_int32 - 1.
col_3 = [max_int32 - max_indices_1 - 1]
X_3 = csr_matrix((data, (row, col_3)))
X_hs_32 = hstack([X_1, X_3], format="csr")
assert X_hs_32.indices.dtype == np.int32
assert X_hs_32.indices.max() == max_int32 - 1
@pytest.mark.parametrize("cls", [csr_matrix, csr_array, csc_matrix, csc_array])
def test_mixed_index_dtype_int_indexing(cls):
# https://github.com/scipy/scipy/issues/20182
rng = np.random.default_rng(0)
base_mtx = cls(sparse.random(50, 50, random_state=rng, density=0.1))
indptr_64bit = base_mtx.copy()
indices_64bit = base_mtx.copy()
indptr_64bit.indptr = base_mtx.indptr.astype(np.int64)
indices_64bit.indices = base_mtx.indices.astype(np.int64)
for mtx in [base_mtx, indptr_64bit, indices_64bit]:
np.testing.assert_array_equal(
mtx[[1,2], :].toarray(),
base_mtx[[1, 2], :].toarray()
)
np.testing.assert_array_equal(
mtx[:, [1, 2]].toarray(),
base_mtx[:, [1, 2]].toarray()
)
def test_broadcast_to():
a = np.array([1, 0, 2])
b = np.array([3])
e = np.zeros((0,))
res_a = csr_array(a)._broadcast_to((2,3))
res_b = csr_array(b)._broadcast_to((4,))
res_c = csr_array(b)._broadcast_to((2,4))
res_d = csr_array(b)._broadcast_to((1,))
res_e = csr_array(e)._broadcast_to((4,0))
assert_array_equal(res_a.toarray(), np.broadcast_to(a, (2,3)))
assert_array_equal(res_b.toarray(), np.broadcast_to(b, (4,)))
assert_array_equal(res_c.toarray(), np.broadcast_to(b, (2,4)))
assert_array_equal(res_d.toarray(), np.broadcast_to(b, (1,)))
assert_array_equal(res_e.toarray(), np.broadcast_to(e, (4,0)))
with pytest.raises(ValueError, match="cannot be broadcast"):
csr_matrix([[1, 2, 0], [3, 0, 1]])._broadcast_to(shape=(2, 1))
with pytest.raises(ValueError, match="cannot be broadcast"):
csr_matrix([[0, 1, 2]])._broadcast_to(shape=(3, 2))
with pytest.raises(ValueError, match="cannot be broadcast"):
csr_array([0, 1, 2])._broadcast_to(shape=(3, 2))

View file

@ -0,0 +1,209 @@
import pytest
import numpy as np
from numpy.testing import assert_equal
import scipy as sp
from scipy.sparse import dok_array, dok_matrix
pytestmark = pytest.mark.thread_unsafe
@pytest.fixture
def d():
return {(0, 1): 1, (0, 2): 2}
@pytest.fixture
def A():
return np.array([[0, 1, 2], [0, 0, 0], [0, 0, 0]])
@pytest.fixture(params=[dok_array, dok_matrix])
def Asp(request):
A = request.param((3, 3))
A[(0, 1)] = 1
A[(0, 2)] = 2
yield A
# Note: __iter__ and comparison dunders act like ndarrays for DOK, not dict.
# Dunders reversed, or, ror, ior work as dict for dok_matrix, raise for dok_array
# All other dict methods on DOK format act like dict methods (with extra checks).
# Start of tests
################
def test_dict_methods_covered(d, Asp):
d_methods = set(dir(d)) - {"__class_getitem__"}
asp_methods = set(dir(Asp))
assert d_methods < asp_methods
def test_clear(d, Asp):
assert d.items() == Asp.items()
d.clear()
Asp.clear()
assert d.items() == Asp.items()
def test_copy(d, Asp):
assert d.items() == Asp.items()
dd = d.copy()
asp = Asp.copy()
assert dd.items() == asp.items()
assert asp.items() == Asp.items()
asp[(0, 1)] = 3
assert Asp[(0, 1)] == 1
def test_fromkeys_default():
# test with default value
edges = [(0, 2), (1, 0), (2, 1)]
Xdok = dok_array.fromkeys(edges)
X = [[0, 0, 1], [1, 0, 0], [0, 1, 0]]
assert_equal(Xdok.toarray(), X)
def test_fromkeys_positional():
# test with positional value
edges = [(0, 2), (1, 0), (2, 1)]
Xdok = dok_array.fromkeys(edges, -1)
X = [[0, 0, -1], [-1, 0, 0], [0, -1, 0]]
assert_equal(Xdok.toarray(), X)
def test_fromkeys_iterator():
it = ((a, a % 2) for a in range(4))
Xdok = dok_array.fromkeys(it)
X = [[1, 0], [0, 1], [1, 0], [0, 1]]
assert_equal(Xdok.toarray(), X)
def test_get(d, Asp):
assert Asp.get((0, 1)) == d.get((0, 1))
assert Asp.get((0, 0), 99) == d.get((0, 0), 99)
with pytest.raises(IndexError, match="out of bounds"):
Asp.get((0, 4), 99)
def test_items(d, Asp):
assert Asp.items() == d.items()
def test_keys(d, Asp):
assert Asp.keys() == d.keys()
def test_pop(d, Asp):
assert d.pop((0, 1)) == 1
assert Asp.pop((0, 1)) == 1
assert d.items() == Asp.items()
assert Asp.pop((22, 21), None) is None
assert Asp.pop((22, 21), "other") == "other"
with pytest.raises(KeyError, match="(22, 21)"):
Asp.pop((22, 21))
with pytest.raises(TypeError, match="got an unexpected keyword argument"):
Asp.pop((22, 21), default=5)
def test_popitem(d, Asp):
assert d.popitem() == Asp.popitem()
assert d.items() == Asp.items()
def test_setdefault(d, Asp):
assert Asp.setdefault((0, 1), 4) == 1
assert Asp.setdefault((2, 2), 4) == 4
d.setdefault((0, 1), 4)
d.setdefault((2, 2), 4)
assert d.items() == Asp.items()
def test_update(d, Asp):
with pytest.raises(NotImplementedError):
Asp.update(Asp)
def test_values(d, Asp):
# Note: dict.values are strange: d={1: 1}; d.values() == d.values() is False
# Using list(d.values()) makes them comparable.
assert list(Asp.values()) == list(d.values())
def test_dunder_getitem(d, Asp):
assert Asp[(0, 1)] == d[(0, 1)]
def test_dunder_setitem(d, Asp):
Asp[(1, 1)] = 5
d[(1, 1)] = 5
assert d.items() == Asp.items()
def test_dunder_delitem(d, Asp):
del Asp[(0, 1)]
del d[(0, 1)]
assert d.items() == Asp.items()
def test_dunder_contains(d, Asp):
assert ((0, 1) in d) == ((0, 1) in Asp)
assert ((0, 0) in d) == ((0, 0) in Asp)
def test_dunder_len(d, Asp):
assert len(d) == len(Asp)
# Note: dunders reversed, or, ror, ior work as dict for dok_matrix, raise for dok_array
def test_dunder_reversed(d, Asp):
if isinstance(Asp, dok_array):
with pytest.raises(TypeError):
list(reversed(Asp))
else:
assert list(reversed(Asp)) == list(reversed(d))
def test_dunder_ior(d, Asp):
if isinstance(Asp, dok_array):
with pytest.raises(TypeError):
Asp |= Asp
else:
dd = {(0, 0): 5}
Asp |= dd
assert Asp[(0, 0)] == 5
d |= dd
assert d.items() == Asp.items()
dd |= Asp
assert dd.items() == Asp.items()
def test_dunder_or(d, Asp):
if isinstance(Asp, dok_array):
with pytest.raises(TypeError):
Asp | Asp
else:
assert d | d == Asp | d
assert d | d == Asp | Asp
def test_dunder_ror(d, Asp):
if isinstance(Asp, dok_array):
with pytest.raises(TypeError):
Asp | Asp
with pytest.raises(TypeError):
d | Asp
else:
assert Asp.__ror__(d) == Asp.__ror__(Asp)
assert d.__ror__(d) == Asp.__ror__(d)
assert d | Asp
# Note: comparison dunders, e.g. ==, >=, etc follow np.array not dict
def test_dunder_eq(A, Asp):
with np.testing.suppress_warnings() as sup:
sup.filter(sp.sparse.SparseEfficiencyWarning)
assert (Asp == Asp).toarray().all()
assert (A == Asp).all()
def test_dunder_ne(A, Asp):
assert not (Asp != Asp).toarray().any()
assert not (A != Asp).any()
def test_dunder_lt(A, Asp):
assert not (Asp < Asp).toarray().any()
assert not (A < Asp).any()
def test_dunder_gt(A, Asp):
assert not (Asp > Asp).toarray().any()
assert not (A > Asp).any()
def test_dunder_le(A, Asp):
with np.testing.suppress_warnings() as sup:
sup.filter(sp.sparse.SparseEfficiencyWarning)
assert (Asp <= Asp).toarray().all()
assert (A <= Asp).all()
def test_dunder_ge(A, Asp):
with np.testing.suppress_warnings() as sup:
sup.filter(sp.sparse.SparseEfficiencyWarning)
assert (Asp >= Asp).toarray().all()
assert (A >= Asp).all()
# Note: iter dunder follows np.array not dict
def test_dunder_iter(A, Asp):
assert all((a == asp).all() for a, asp in zip(A, Asp))

View file

@ -0,0 +1,51 @@
"""test sparse matrix construction functions"""
from numpy.testing import assert_equal
from scipy.sparse import csr_matrix, csr_array, sparray
import numpy as np
from scipy.sparse import _extract
class TestExtract:
def setup_method(self):
self.cases = [
csr_array([[1,2]]),
csr_array([[1,0]]),
csr_array([[0,0]]),
csr_array([[1],[2]]),
csr_array([[1],[0]]),
csr_array([[0],[0]]),
csr_array([[1,2],[3,4]]),
csr_array([[0,1],[0,0]]),
csr_array([[0,0],[1,0]]),
csr_array([[0,0],[0,0]]),
csr_array([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]),
csr_array([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]).T,
]
def test_find(self):
for A in self.cases:
I,J,V = _extract.find(A)
B = csr_array((V,(I,J)), shape=A.shape)
assert_equal(A.toarray(), B.toarray())
def test_tril(self):
for A in self.cases:
B = A.toarray()
for k in [-3,-2,-1,0,1,2,3]:
assert_equal(_extract.tril(A,k=k).toarray(), np.tril(B,k=k))
def test_triu(self):
for A in self.cases:
B = A.toarray()
for k in [-3,-2,-1,0,1,2,3]:
assert_equal(_extract.triu(A,k=k).toarray(), np.triu(B,k=k))
def test_array_vs_matrix(self):
for A in self.cases:
assert isinstance(_extract.tril(A), sparray)
assert isinstance(_extract.triu(A), sparray)
M = csr_matrix(A)
assert not isinstance(_extract.tril(M), sparray)
assert not isinstance(_extract.triu(M), sparray)

View file

@ -0,0 +1,603 @@
import contextlib
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy.sparse import csr_array, dok_array, SparseEfficiencyWarning
from .test_arithmetic1d import toarray
formats_for_index1d = [csr_array, dok_array]
@contextlib.contextmanager
def check_remains_sorted(X):
"""Checks that sorted indices property is retained through an operation"""
yield
if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices:
return
indices = X.indices.copy()
X.has_sorted_indices = False
X.sort_indices()
assert_equal(indices, X.indices, 'Expected sorted indices, found unsorted')
@pytest.mark.parametrize("spcreator", formats_for_index1d)
class TestGetSet1D:
def test_None_index(self, spcreator):
D = np.array([4, 3, 0])
A = spcreator(D)
N = D.shape[0]
for j in range(-N, N):
assert_equal(A[j, None].toarray(), D[j, None])
assert_equal(A[None, j].toarray(), D[None, j])
assert_equal(A[None, None, j].toarray(), D[None, None, j])
def test_getitem_shape(self, spcreator):
A = spcreator(np.arange(3 * 4).reshape(3, 4))
assert A[1, 2].ndim == 0
assert A[1, 2:3].shape == (1,)
assert A[None, 1, 2:3].shape == (1, 1)
assert A[None, 1, 2].shape == (1,)
assert A[None, 1, 2, None].shape == (1, 1)
# see gh-22458
assert A[None, 1].shape == (1, 4)
assert A[1, None].shape == (1, 4)
assert A[None, 1, :].shape == (1, 4)
assert A[1, None, :].shape == (1, 4)
assert A[1, :, None].shape == (4, 1)
with pytest.raises(IndexError, match='Only 1D or 2D arrays'):
A[None, 2, 1, None, None]
with pytest.raises(IndexError, match='Only 1D or 2D arrays'):
A[None, 0:2, None, 1]
with pytest.raises(IndexError, match='Only 1D or 2D arrays'):
A[0:1, 1:, None]
with pytest.raises(IndexError, match='Only 1D or 2D arrays'):
A[1:, 1, None, None]
def test_getelement(self, spcreator):
D = np.array([4, 3, 0])
A = spcreator(D)
N = D.shape[0]
for j in range(-N, N):
assert_equal(A[j], D[j])
for ij in [3, -4]:
with pytest.raises(IndexError, match='index (.*) out of (range|bounds)'):
A.__getitem__(ij)
# single element tuples unwrapped
assert A[(0,)] == 4
with pytest.raises(IndexError, match='index (.*) out of (range|bounds)'):
A.__getitem__((4,))
def test_setelement(self, spcreator):
dtype = np.float64
A = spcreator((12,), dtype=dtype)
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
A[0] = dtype(0)
A[1] = dtype(3)
A[8] = dtype(9.0)
A[-2] = dtype(7)
A[5] = 9
A[-9,] = dtype(8)
A[1,] = dtype(5) # overwrite using 1-tuple index
for ij in [13, -14, (13,), (14,)]:
with pytest.raises(IndexError, match='out of (range|bounds)'):
A.__setitem__(ij, 123.0)
@pytest.mark.parametrize("spcreator", formats_for_index1d)
class TestSlicingAndFancy1D:
#######################
# Int-like Array Index
#######################
def test_get_array_index(self, spcreator):
D = np.array([4, 3, 0])
A = spcreator(D)
assert_equal(A[()].toarray(), D[()])
for ij in [(0, 3), (3,)]:
with pytest.raises(IndexError, match='out of (range|bounds)|many indices'):
A.__getitem__(ij)
def test_set_array_index(self, spcreator):
dtype = np.float64
A = spcreator((12,), dtype=dtype)
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
A[np.array(6)] = dtype(4.0) # scalar index
A[np.array(6)] = dtype(2.0) # overwrite with scalar index
assert_equal(A.toarray(), [0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0])
for ij in [(13,), (-14,)]:
with pytest.raises(IndexError, match='index .* out of (range|bounds)'):
A.__setitem__(ij, 123.0)
for v in [(), (0, 3), [1, 2, 3], np.array([1, 2, 3])]:
msg = 'Trying to assign a sequence to an item'
with pytest.raises(ValueError, match=msg):
A.__setitem__(0, v)
####################
# 1d Slice as index
####################
def test_dtype_preservation(self, spcreator):
assert_equal(spcreator((10,), dtype=np.int16)[1:5].dtype, np.int16)
assert_equal(spcreator((6,), dtype=np.int32)[0:0:2].dtype, np.int32)
assert_equal(spcreator((6,), dtype=np.int64)[:].dtype, np.int64)
def test_get_1d_slice(self, spcreator):
B = np.arange(50.)
A = spcreator(B)
assert_equal(B[:], A[:].toarray())
assert_equal(B[2:5], A[2:5].toarray())
C = np.array([4, 0, 6, 0, 0, 0, 0, 0, 1])
D = spcreator(C)
assert_equal(C[1:3], D[1:3].toarray())
# Now test slicing when a row contains only zeros
E = np.array([0, 0, 0, 0, 0])
F = spcreator(E)
assert_equal(E[1:3], F[1:3].toarray())
assert_equal(E[-2:], F[-2:].toarray())
assert_equal(E[:], F[:].toarray())
assert_equal(E[slice(None)], F[slice(None)].toarray())
def test_slicing_idx_slice(self, spcreator):
B = np.arange(50)
A = spcreator(B)
# [i]
assert_equal(A[2], B[2])
assert_equal(A[-1], B[-1])
assert_equal(A[np.array(-2)], B[-2])
# [1:2]
assert_equal(A[:].toarray(), B[:])
assert_equal(A[5:-2].toarray(), B[5:-2])
assert_equal(A[5:12:3].toarray(), B[5:12:3])
# int8 slice
s = slice(np.int8(2), np.int8(4), None)
assert_equal(A[s].toarray(), B[2:4])
# np.s_
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[15:20], s_[3:2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
np.array(-1), np.int8(-3)]
for j, a in enumerate(slices):
x = A[a]
y = B[a]
if y.shape == ():
assert_equal(x, y, repr(a))
else:
if x.size == 0 and y.size == 0:
pass
else:
assert_equal(x.toarray(), y, repr(a))
def test_ellipsis_1d_slicing(self, spcreator):
B = np.arange(50)
A = spcreator(B)
assert_equal(A[...].toarray(), B[...])
assert_equal(A[...,].toarray(), B[...,])
##########################
# Assignment with Slicing
##########################
def test_slice_scalar_assign(self, spcreator):
A = spcreator((5,))
B = np.zeros((5,))
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
for C in [A, B]:
C[0:1] = 1
C[2:0] = 4
C[2:3] = 9
C[3:] = 1
C[3::-1] = 9
assert_equal(A.toarray(), B)
def test_slice_assign_2(self, spcreator):
shape = (10,)
for idx in [slice(3), slice(None, 10, 4), slice(5, -2)]:
A = spcreator(shape)
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
A[idx] = 1
B = np.zeros(shape)
B[idx] = 1
msg = f"idx={idx!r}"
assert_allclose(A.toarray(), B, err_msg=msg)
def test_self_self_assignment(self, spcreator):
# Tests whether a row of one lil_matrix can be assigned to another.
B = spcreator((5,))
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
B[0] = 2
B[1] = 0
B[2] = 3
B[3] = 10
A = B / 10
B[:] = A[:]
assert_equal(A[:].toarray(), B[:].toarray())
A = B / 10
B[:] = A[:1]
assert_equal(np.zeros((5,)) + A[0], B.toarray())
A = B / 10
B[:-1] = A[1:]
assert_equal(A[1:].toarray(), B[:-1].toarray())
def test_slice_assignment(self, spcreator):
B = spcreator((4,))
expected = np.array([10, 0, 14, 0])
block = [2, 1]
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
B[0] = 5
B[2] = 7
B[:] = B + B
assert_equal(B.toarray(), expected)
B[:2] = csr_array(block)
assert_equal(B.toarray()[:2], block)
def test_set_slice(self, spcreator):
A = spcreator((5,))
B = np.zeros(5, float)
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
np.array(-1), np.int8(-3)]
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
for j, a in enumerate(slices):
A[a] = j
B[a] = j
assert_equal(A.toarray(), B, repr(a))
A[1:10:2] = range(1, 5, 2)
B[1:10:2] = range(1, 5, 2)
assert_equal(A.toarray(), B)
# The next commands should raise exceptions
toobig = list(range(100))
with pytest.raises(ValueError, match='Trying to assign a sequence to an item'):
A.__setitem__(0, toobig)
with pytest.raises(ValueError, match='could not be broadcast together'):
A.__setitem__(slice(None), toobig)
def test_assign_empty(self, spcreator):
A = spcreator(np.ones(3))
B = spcreator((2,))
A[:2] = B
assert_equal(A.toarray(), [0, 0, 1])
####################
# 1d Fancy Indexing
####################
def test_dtype_preservation_empty_index(self, spcreator):
A = spcreator((2,), dtype=np.int16)
assert_equal(A[[False, False]].dtype, np.int16)
assert_equal(A[[]].dtype, np.int16)
def test_bad_index(self, spcreator):
A = spcreator(np.zeros(5))
with pytest.raises(
(IndexError, ValueError, TypeError),
match='Index dimension must be 1 or 2|only integers',
):
A.__getitem__("foo")
with pytest.raises(
(IndexError, ValueError, TypeError),
match='tuple index out of range|only integers',
):
A.__getitem__((2, "foo"))
def test_fancy_indexing_2darray(self, spcreator):
B = np.arange(50).reshape((5, 10))
A = spcreator(B)
# [i]
assert_equal(A[[1, 3]].toarray(), B[[1, 3]])
# [i,[1,2]]
assert_equal(A[3, [1, 3]].toarray(), B[3, [1, 3]])
assert_equal(A[-1, [2, -5]].toarray(), B[-1, [2, -5]])
assert_equal(A[np.array(-1), [2, -5]].toarray(), B[-1, [2, -5]])
assert_equal(A[-1, np.array([2, -5])].toarray(), B[-1, [2, -5]])
assert_equal(A[np.array(-1), np.array([2, -5])].toarray(), B[-1, [2, -5]])
# [1:2,[1,2]]
assert_equal(A[:, [2, 8, 3, -1]].toarray(), B[:, [2, 8, 3, -1]])
assert_equal(A[3:4, [9]].toarray(), B[3:4, [9]])
assert_equal(A[1:4, [-1, -5]].toarray(), B[1:4, [-1, -5]])
assert_equal(A[1:4, np.array([-1, -5])].toarray(), B[1:4, [-1, -5]])
# [[1,2],j]
assert_equal(A[[1, 3], 3].toarray(), B[[1, 3], 3])
assert_equal(A[[2, -5], -4].toarray(), B[[2, -5], -4])
assert_equal(A[np.array([2, -5]), -4].toarray(), B[[2, -5], -4])
assert_equal(A[[2, -5], np.array(-4)].toarray(), B[[2, -5], -4])
assert_equal(A[np.array([2, -5]), np.array(-4)].toarray(), B[[2, -5], -4])
# [[1,2],1:2]
assert_equal(A[[1, 3], :].toarray(), B[[1, 3], :])
assert_equal(A[[2, -5], 8:-1].toarray(), B[[2, -5], 8:-1])
assert_equal(A[np.array([2, -5]), 8:-1].toarray(), B[[2, -5], 8:-1])
# [[1,2],[1,2]]
assert_equal(toarray(A[[1, 3], [2, 4]]), B[[1, 3], [2, 4]])
assert_equal(toarray(A[[-1, -3], [2, -4]]), B[[-1, -3], [2, -4]])
assert_equal(
toarray(A[np.array([-1, -3]), [2, -4]]), B[[-1, -3], [2, -4]]
)
assert_equal(
toarray(A[[-1, -3], np.array([2, -4])]), B[[-1, -3], [2, -4]]
)
assert_equal(
toarray(A[np.array([-1, -3]), np.array([2, -4])]), B[[-1, -3], [2, -4]]
)
# [[[1],[2]],[1,2]]
assert_equal(A[[[1], [3]], [2, 4]].toarray(), B[[[1], [3]], [2, 4]])
assert_equal(
A[[[-1], [-3], [-2]], [2, -4]].toarray(),
B[[[-1], [-3], [-2]], [2, -4]]
)
assert_equal(
A[np.array([[-1], [-3], [-2]]), [2, -4]].toarray(),
B[[[-1], [-3], [-2]], [2, -4]]
)
assert_equal(
A[[[-1], [-3], [-2]], np.array([2, -4])].toarray(),
B[[[-1], [-3], [-2]], [2, -4]]
)
assert_equal(
A[np.array([[-1], [-3], [-2]]), np.array([2, -4])].toarray(),
B[[[-1], [-3], [-2]], [2, -4]]
)
# [[1,2]]
assert_equal(A[[1, 3]].toarray(), B[[1, 3]])
assert_equal(A[[-1, -3]].toarray(), B[[-1, -3]])
assert_equal(A[np.array([-1, -3])].toarray(), B[[-1, -3]])
# [[1,2],:][:,[1,2]]
assert_equal(
A[[1, 3], :][:, [2, 4]].toarray(), B[[1, 3], :][:, [2, 4]]
)
assert_equal(
A[[-1, -3], :][:, [2, -4]].toarray(), B[[-1, -3], :][:, [2, -4]]
)
assert_equal(
A[np.array([-1, -3]), :][:, np.array([2, -4])].toarray(),
B[[-1, -3], :][:, [2, -4]]
)
# [:,[1,2]][[1,2],:]
assert_equal(
A[:, [1, 3]][[2, 4], :].toarray(), B[:, [1, 3]][[2, 4], :]
)
assert_equal(
A[:, [-1, -3]][[2, -4], :].toarray(), B[:, [-1, -3]][[2, -4], :]
)
assert_equal(
A[:, np.array([-1, -3])][np.array([2, -4]), :].toarray(),
B[:, [-1, -3]][[2, -4], :]
)
def test_fancy_indexing(self, spcreator):
B = np.arange(50)
A = spcreator(B)
# [i]
assert_equal(A[[3]].toarray(), B[[3]])
# [np.array]
assert_equal(A[[1, 3]].toarray(), B[[1, 3]])
assert_equal(A[[2, -5]].toarray(), B[[2, -5]])
assert_equal(A[np.array(-1)], B[-1])
assert_equal(A[np.array([-1, 2])].toarray(), B[[-1, 2]])
assert_equal(A[np.array(5)], B[np.array(5)])
# [[[1],[2]]]
ind = np.array([[1], [3]])
assert_equal(A[ind].toarray(), B[ind])
ind = np.array([[-1], [-3], [-2]])
assert_equal(A[ind].toarray(), B[ind])
# [[1, 2]]
assert_equal(A[[1, 3]].toarray(), B[[1, 3]])
assert_equal(A[[-1, -3]].toarray(), B[[-1, -3]])
assert_equal(A[np.array([-1, -3])].toarray(), B[[-1, -3]])
# [[1, 2]][[1, 2]]
assert_equal(A[[1, 5, 2, 8]][[1, 3]].toarray(),
B[[1, 5, 2, 8]][[1, 3]])
assert_equal(A[[-1, -5, 2, 8]][[1, -4]].toarray(),
B[[-1, -5, 2, 8]][[1, -4]])
def test_fancy_indexing_boolean(self, spcreator):
np.random.seed(1234) # make runs repeatable
B = np.arange(50)
A = spcreator(B)
I = np.array(np.random.randint(0, 2, size=50), dtype=bool)
assert_equal(toarray(A[I]), B[I])
assert_equal(toarray(A[B > 9]), B[B > 9])
Z1 = np.zeros(51, dtype=bool)
Z2 = np.zeros(51, dtype=bool)
Z2[-1] = True
Z3 = np.zeros(51, dtype=bool)
Z3[0] = True
msg = 'bool index .* has shape|boolean index did not match'
with pytest.raises(IndexError, match=msg):
A.__getitem__(Z1)
with pytest.raises(IndexError, match=msg):
A.__getitem__(Z2)
with pytest.raises(IndexError, match=msg):
A.__getitem__(Z3)
def test_fancy_indexing_sparse_boolean(self, spcreator):
np.random.seed(1234) # make runs repeatable
B = np.arange(20)
A = spcreator(B)
X = np.array(np.random.randint(0, 2, size=20), dtype=bool)
Xsp = csr_array(X)
assert_equal(toarray(A[Xsp]), B[X])
assert_equal(toarray(A[A > 9]), B[B > 9])
Y = np.array(np.random.randint(0, 2, size=60), dtype=bool)
Ysp = csr_array(Y)
with pytest.raises(IndexError, match='bool index .* has shape|only integers'):
A.__getitem__(Ysp)
with pytest.raises(IndexError, match='tuple index out of range|only integers'):
A.__getitem__((Xsp, 1))
def test_fancy_indexing_seq_assign(self, spcreator):
mat = spcreator(np.array([1, 0]))
with pytest.raises(ValueError, match='Trying to assign a sequence to an item'):
mat.__setitem__(0, np.array([1, 2]))
def test_fancy_indexing_empty(self, spcreator):
B = np.arange(50)
B[3:9] = 0
B[30] = 0
A = spcreator(B)
K = np.array([False] * 50)
assert_equal(toarray(A[K]), B[K])
K = np.array([], dtype=int)
assert_equal(toarray(A[K]), B[K])
J = np.array([0, 1, 2, 3, 4], dtype=int)
assert_equal(toarray(A[J]), B[J])
############################
# 1d Fancy Index Assignment
############################
def test_bad_index_assign(self, spcreator):
A = spcreator(np.zeros(5))
msg = 'Index dimension must be 1 or 2|only integers'
with pytest.raises((IndexError, ValueError, TypeError), match=msg):
A.__setitem__("foo", 2)
def test_fancy_indexing_set(self, spcreator):
M = (5,)
# [1:2]
for j in [[2, 3, 4], slice(None, 10, 4), np.arange(3),
slice(5, -2), slice(2, 5)]:
A = spcreator(M)
B = np.zeros(M)
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
B[j] = 1
with check_remains_sorted(A):
A[j] = 1
assert_allclose(A.toarray(), B)
def test_sequence_assignment(self, spcreator):
A = spcreator((4,))
B = spcreator((3,))
i0 = [0, 1, 2]
i1 = (0, 1, 2)
i2 = np.array(i0)
with np.testing.suppress_warnings() as sup:
sup.filter(
SparseEfficiencyWarning,
"Changing the sparsity structure of .* is expensive",
)
with check_remains_sorted(A):
A[i0] = B[i0]
msg = "too many indices for array|tuple index out of range"
with pytest.raises(IndexError, match=msg):
B.__getitem__(i1)
A[i2] = B[i2]
assert_equal(A[:3].toarray(), B.toarray())
assert A.shape == (4,)
# slice
A = spcreator((4,))
with check_remains_sorted(A):
A[1:3] = [10, 20]
assert_equal(A.toarray(), [0, 10, 20, 0])
# array
A = spcreator((4,))
B = np.zeros(4)
with check_remains_sorted(A):
for C in [A, B]:
C[[0, 1, 2]] = [4, 5, 6]
assert_equal(A.toarray(), B)
def test_fancy_assign_empty(self, spcreator):
B = np.arange(50)
B[2] = 0
B[[3, 6]] = 0
A = spcreator(B)
K = np.array([False] * 50)
A[K] = 42
assert_equal(A.toarray(), B)
K = np.array([], dtype=int)
A[K] = 42
assert_equal(A.toarray(), B)

View file

@ -0,0 +1,109 @@
import os
import numpy as np
import tempfile
from pytest import raises as assert_raises
from numpy.testing import assert_equal, assert_
from scipy.sparse import (sparray, csc_matrix, csr_matrix, bsr_matrix, dia_matrix,
coo_matrix, dok_matrix, csr_array, save_npz, load_npz)
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
def _save_and_load(matrix):
fd, tmpfile = tempfile.mkstemp(suffix='.npz')
os.close(fd)
try:
save_npz(tmpfile, matrix)
loaded_matrix = load_npz(tmpfile)
finally:
os.remove(tmpfile)
return loaded_matrix
def _check_save_and_load(dense_matrix):
for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]:
matrix = matrix_class(dense_matrix)
loaded_matrix = _save_and_load(matrix)
assert_(type(loaded_matrix) is matrix_class)
assert_(loaded_matrix.shape == dense_matrix.shape)
assert_(loaded_matrix.dtype == dense_matrix.dtype)
assert_equal(loaded_matrix.toarray(), dense_matrix)
def test_save_and_load_random():
N = 10
np.random.seed(0)
dense_matrix = np.random.random((N, N))
dense_matrix[dense_matrix > 0.7] = 0
_check_save_and_load(dense_matrix)
def test_save_and_load_empty():
dense_matrix = np.zeros((4,6))
_check_save_and_load(dense_matrix)
def test_save_and_load_one_entry():
dense_matrix = np.zeros((4,6))
dense_matrix[1,2] = 1
_check_save_and_load(dense_matrix)
def test_sparray_vs_spmatrix():
#save/load matrix
fd, tmpfile = tempfile.mkstemp(suffix='.npz')
os.close(fd)
try:
save_npz(tmpfile, csr_matrix([[1.2, 0, 0.9], [0, 0.3, 0]]))
loaded_matrix = load_npz(tmpfile)
finally:
os.remove(tmpfile)
#save/load array
fd, tmpfile = tempfile.mkstemp(suffix='.npz')
os.close(fd)
try:
save_npz(tmpfile, csr_array([[1.2, 0, 0.9], [0, 0.3, 0]]))
loaded_array = load_npz(tmpfile)
finally:
os.remove(tmpfile)
assert not isinstance(loaded_matrix, sparray)
assert isinstance(loaded_array, sparray)
assert_(loaded_matrix.dtype == loaded_array.dtype)
assert_equal(loaded_matrix.toarray(), loaded_array.toarray())
def test_malicious_load():
class Executor:
def __reduce__(self):
return (assert_, (False, 'unexpected code execution'))
fd, tmpfile = tempfile.mkstemp(suffix='.npz')
os.close(fd)
try:
np.savez(tmpfile, format=Executor())
# Should raise a ValueError, not execute code
assert_raises(ValueError, load_npz, tmpfile)
finally:
os.remove(tmpfile)
def test_py23_compatibility():
# Try loading files saved on Python 2 and Python 3. They are not
# the same, since files saved with SciPy versions < 1.0.0 may
# contain unicode.
a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz'))
b = load_npz(os.path.join(DATA_DIR, 'csc_py3.npz'))
c = csc_matrix([[0]])
assert_equal(a.toarray(), c.toarray())
assert_equal(b.toarray(), c.toarray())
def test_implemented_error():
# Attempts to save an unsupported type and checks that an
# NotImplementedError is raised.
x = dok_matrix((2,3))
x[0,1] = 1
assert_raises(NotImplementedError, save_npz, 'x.npz', x)

View file

@ -0,0 +1,128 @@
"""Test of min-max 1D features of sparse array classes"""
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from scipy.sparse import coo_array, csr_array, csc_array, bsr_array
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix, bsr_matrix
from scipy.sparse._sputils import isscalarlike
def toarray(a):
if isinstance(a, np.ndarray) or isscalarlike(a):
return a
return a.toarray()
formats_for_minmax = [bsr_array, coo_array, csc_array, csr_array]
formats_for_minmax_supporting_1d = [coo_array, csr_array]
@pytest.mark.parametrize("spcreator", formats_for_minmax_supporting_1d)
class Test_MinMaxMixin1D:
def test_minmax(self, spcreator):
D = np.arange(5)
X = spcreator(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 4)
assert_equal((-X).min(), -4)
assert_equal((-X).max(), 0)
def test_minmax_axis(self, spcreator):
D = np.arange(50)
X = spcreator(D)
for axis in [0, -1]:
assert_array_equal(
toarray(X.max(axis=axis)), D.max(axis=axis, keepdims=True)
)
assert_array_equal(
toarray(X.min(axis=axis)), D.min(axis=axis, keepdims=True)
)
for axis in [-2, 1]:
with pytest.raises(ValueError, match="axis out of range"):
X.min(axis=axis)
with pytest.raises(ValueError, match="axis out of range"):
X.max(axis=axis)
def test_numpy_minmax(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
assert_array_equal(np.min(datsp), np.min(dat))
assert_array_equal(np.max(datsp), np.max(dat))
def test_argmax(self, spcreator):
D1 = np.array([-1, 5, 2, 3])
D2 = np.array([0, 0, -1, -2])
D3 = np.array([-1, -2, -3, -4])
D4 = np.array([1, 2, 3, 4])
D5 = np.array([1, 2, 0, 0])
for D in [D1, D2, D3, D4, D5]:
mat = spcreator(D)
assert_equal(mat.argmax(), np.argmax(D))
assert_equal(mat.argmin(), np.argmin(D))
assert_equal(mat.argmax(axis=0), np.argmax(D, axis=0))
assert_equal(mat.argmin(axis=0), np.argmin(D, axis=0))
D6 = np.empty((0,))
for axis in [None, 0]:
mat = spcreator(D6)
with pytest.raises(ValueError, match="to an empty matrix"):
mat.argmin(axis=axis)
with pytest.raises(ValueError, match="to an empty matrix"):
mat.argmax(axis=axis)
@pytest.mark.parametrize("spcreator", formats_for_minmax)
class Test_ShapeMinMax2DWithAxis:
def test_minmax(self, spcreator):
dat = np.array([[-1, 5, 0, 3], [0, 0, -1, -2], [0, 0, 1, 2]])
datsp = spcreator(dat)
for (spminmax, npminmax) in [
(datsp.min, np.min),
(datsp.max, np.max),
(datsp.nanmin, np.nanmin),
(datsp.nanmax, np.nanmax),
]:
for ax, result_shape in [(0, (4,)), (1, (3,))]:
assert_equal(toarray(spminmax(axis=ax)), npminmax(dat, axis=ax))
assert_equal(spminmax(axis=ax).shape, result_shape)
assert spminmax(axis=ax).format == "coo"
for spminmax in [datsp.argmin, datsp.argmax]:
for ax in [0, 1]:
assert isinstance(spminmax(axis=ax), np.ndarray)
# verify spmatrix behavior
spmat_form = {
'coo': coo_matrix,
'csr': csr_matrix,
'csc': csc_matrix,
'bsr': bsr_matrix,
}
datspm = spmat_form[datsp.format](dat)
for spm, npm in [
(datspm.min, np.min),
(datspm.max, np.max),
(datspm.nanmin, np.nanmin),
(datspm.nanmax, np.nanmax),
]:
for ax, result_shape in [(0, (1, 4)), (1, (3, 1))]:
assert_equal(toarray(spm(axis=ax)), npm(dat, axis=ax, keepdims=True))
assert_equal(spm(axis=ax).shape, result_shape)
assert spm(axis=ax).format == "coo"
for spminmax in [datspm.argmin, datspm.argmax]:
for ax in [0, 1]:
assert isinstance(spminmax(axis=ax), np.ndarray)

View file

@ -0,0 +1,344 @@
import sys
import os
import gc
import threading
import numpy as np
from numpy.testing import assert_equal, assert_, assert_allclose
from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix,
bsr_matrix, dia_matrix)
from scipy.sparse._sputils import supported_dtypes
from scipy._lib._testutils import check_free_memory
import pytest
from pytest import raises as assert_raises
def int_to_int8(n):
"""
Wrap an integer to the interval [-128, 127].
"""
return (n + 128) % 256 - 128
@pytest.mark.thread_unsafe # Exception handling in CPython 3.13 has races
def test_exception():
assert_raises(MemoryError, _sparsetools.test_throw_error)
def test_threads():
# Smoke test for parallel threaded execution; doesn't actually
# check that code runs in parallel, but just that it produces
# expected results.
nthreads = 10
niter = 100
n = 20
a = csr_matrix(np.ones([n, n]))
bres = []
class Worker(threading.Thread):
def run(self):
b = a.copy()
for j in range(niter):
_sparsetools.csr_plus_csr(n, n,
a.indptr, a.indices, a.data,
a.indptr, a.indices, a.data,
b.indptr, b.indices, b.data)
bres.append(b)
threads = [Worker() for _ in range(nthreads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for b in bres:
assert_(np.all(b.toarray() == 2))
def test_regression_std_vector_dtypes():
# Regression test for gh-3780, checking the std::vector typemaps
# in sparsetools.cxx are complete.
for dtype in supported_dtypes:
ad = np.array([[1, 2], [3, 4]]).astype(dtype)
a = csr_matrix(ad, dtype=dtype)
# getcol is one function using std::vector typemaps, and should not fail
assert_equal(a.getcol(0).toarray(), ad[:, :1])
@pytest.mark.slow
@pytest.mark.thread_unsafe
@pytest.mark.xfail_on_32bit("Can't create large array for test")
def test_nnz_overflow():
# Regression test for gh-7230 / gh-7871, checking that coo_toarray
# with nnz > int32max doesn't overflow.
nnz = np.iinfo(np.int32).max + 1
# Ensure ~20 GB of RAM is free to run this test.
check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5)
# Use nnz duplicate entries to keep the dense version small.
row = np.zeros(nnz, dtype=np.int32)
col = np.zeros(nnz, dtype=np.int32)
data = np.zeros(nnz, dtype=np.int8)
data[-1] = 4
s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False)
# Sums nnz duplicates to produce a 1x1 array containing 4.
d = s.toarray()
assert_allclose(d, [[4]])
@pytest.mark.thread_unsafe
@pytest.mark.skipif(
not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8),
reason="test requires 64-bit Linux"
)
class TestInt32Overflow:
"""
Some of the sparsetools routines use dense 2D matrices whose
total size is not bounded by the nnz of the sparse matrix. These
routines used to suffer from int32 wraparounds; here, we try to
check that the wraparounds don't occur any more.
"""
# choose n large enough
n = 50000
def setup_method(self):
assert self.n**2 > np.iinfo(np.int32).max
# check there's enough memory even if everything is run at the
# same time
try:
parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1'))
except ValueError:
parallel_count = np.inf
check_free_memory(3000 * parallel_count)
def teardown_method(self):
gc.collect()
@pytest.mark.fail_slow(2) # keep in fast set, only non-slow test
def test_coo_todense(self):
# Check *_todense routines (cf. gh-2179)
#
# All of them in the end call coo_matrix.todense
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
r = m.todense()
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
@pytest.mark.slow
def test_matvecs(self):
# Check *_matvecs routines
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
b = np.ones((n, n), dtype=np.int8)
for sptype in (csr_matrix, csc_matrix, bsr_matrix):
m2 = sptype(m)
r = m2.dot(b)
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
del b
gc.collect()
@pytest.mark.slow
def test_dia_matvec(self):
# Check: huge dia_matrix _matvec
n = self.n
data = np.ones((n, n), dtype=np.int8)
offsets = np.arange(n)
m = dia_matrix((data, offsets), shape=(n, n))
v = np.ones(m.shape[1], dtype=np.int8)
r = m.dot(v)
assert_equal(r[0], int_to_int8(n))
del data, offsets, m, v, r
gc.collect()
_bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow),
pytest.param("matvecs", marks=pytest.mark.xslow),
"matvec",
"diagonal",
"sort_indices",
pytest.param("transpose", marks=pytest.mark.xslow)]
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_1_block(self, op):
# Check: huge bsr_matrix (1-block)
#
# The point here is that indices inside a block may overflow.
def get_matrix():
n = self.n
data = np.ones((1, n, n), dtype=np.int8)
indptr = np.array([0, 1], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_n_block(self, op):
# Check: huge bsr_matrix (n-block)
#
# The point here is that while indices within a block don't
# overflow, accumulators across many block may.
def get_matrix():
n = self.n
data = np.ones((n, n, 1), dtype=np.int8)
indptr = np.array([0, n], dtype=np.int32)
indices = np.arange(n, dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
def _check_bsr_matvecs(self, m): # skip name check
m = m()
n = self.n
# _matvecs
r = m.dot(np.ones((n, 2), dtype=np.int8))
assert_equal(r[0, 0], int_to_int8(n))
def _check_bsr_matvec(self, m): # skip name check
m = m()
n = self.n
# _matvec
r = m.dot(np.ones((n,), dtype=np.int8))
assert_equal(r[0], int_to_int8(n))
def _check_bsr_diagonal(self, m): # skip name check
m = m()
n = self.n
# _diagonal
r = m.diagonal()
assert_equal(r, np.ones(n))
def _check_bsr_sort_indices(self, m): # skip name check
# _sort_indices
m = m()
m.sort_indices()
def _check_bsr_transpose(self, m): # skip name check
# _transpose
m = m()
m.transpose()
def _check_bsr_matmat(self, m): # skip name check
m = m()
n = self.n
# _bsr_matmat
m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2))
m.dot(m2) # shouldn't SIGSEGV
del m2
# _bsr_matmat
m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0]))
m2.dot(m) # shouldn't SIGSEGV
@pytest.mark.thread_unsafe
@pytest.mark.skip(reason="64-bit indices in sparse matrices not available")
def test_csr_matmat_int64_overflow():
n = 3037000500
assert n**2 > np.iinfo(np.int64).max
# the test would take crazy amounts of memory
check_free_memory(n * (8*2 + 1) * 3 / 1e6)
# int64 overflow
data = np.ones((n,), dtype=np.int8)
indptr = np.arange(n+1, dtype=np.int64)
indices = np.zeros(n, dtype=np.int64)
a = csr_matrix((data, indices, indptr))
b = a.T
assert_raises(RuntimeError, a.dot, b)
def test_upcast():
a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex)
b0 = np.array([256+1j, 2**32], dtype=complex)
for a_dtype in supported_dtypes:
for b_dtype in supported_dtypes:
msg = f"({a_dtype!r}, {b_dtype!r})"
if np.issubdtype(a_dtype, np.complexfloating):
a = a0.copy().astype(a_dtype)
else:
a = a0.real.copy().astype(a_dtype)
if np.issubdtype(b_dtype, np.complexfloating):
b = b0.copy().astype(b_dtype)
else:
with np.errstate(invalid="ignore"):
# Casting a large value (2**32) to int8 causes a warning in
# numpy >1.23
b = b0.real.copy().astype(b_dtype)
if not (a_dtype == np.bool_ and b_dtype == np.bool_):
c = np.zeros((2,), dtype=np.bool_)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
if ((np.issubdtype(a_dtype, np.complexfloating) and
not np.issubdtype(b_dtype, np.complexfloating)) or
(not np.issubdtype(a_dtype, np.complexfloating) and
np.issubdtype(b_dtype, np.complexfloating))):
c = np.zeros((2,), dtype=np.float64)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype))
_sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c)
assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg)
def test_endianness():
d = np.ones((3,4))
offsets = [-1,0,1]
a = dia_matrix((d.astype('<f8'), offsets), (4, 4))
b = dia_matrix((d.astype('>f8'), offsets), (4, 4))
v = np.arange(4)
assert_allclose(a.dot(v), [1, 3, 6, 5])
assert_allclose(b.dot(v), [1, 3, 6, 5])

View file

@ -0,0 +1,97 @@
from numpy import array, kron, diag
from numpy.testing import assert_, assert_equal
from scipy.sparse import _spfuncs as spfuncs
from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix
from scipy.sparse._sparsetools import (csr_scale_rows, csr_scale_columns,
bsr_scale_rows, bsr_scale_columns)
class TestSparseFunctions:
def test_scale_rows_and_cols(self):
D = array([[1, 0, 0, 2, 3],
[0, 4, 0, 5, 0],
[0, 0, 6, 7, 0]])
#TODO expose through function
S = csr_matrix(D)
v = array([1,2,3])
csr_scale_rows(3,5,S.indptr,S.indices,S.data,v)
assert_equal(S.toarray(), diag(v)@D)
S = csr_matrix(D)
v = array([1,2,3,4,5])
csr_scale_columns(3,5,S.indptr,S.indices,S.data,v)
assert_equal(S.toarray(), D@diag(v))
# blocks
E = kron(D,[[1,2],[3,4]])
S = bsr_matrix(E,blocksize=(2,2))
v = array([1,2,3,4,5,6])
bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v)
assert_equal(S.toarray(), diag(v)@E)
S = bsr_matrix(E,blocksize=(2,2))
v = array([1,2,3,4,5,6,7,8,9,10])
bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v)
assert_equal(S.toarray(), E@diag(v))
E = kron(D,[[1,2,3],[4,5,6]])
S = bsr_matrix(E,blocksize=(2,3))
v = array([1,2,3,4,5,6])
bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v)
assert_equal(S.toarray(), diag(v)@E)
S = bsr_matrix(E,blocksize=(2,3))
v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15])
bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v)
assert_equal(S.toarray(), E@diag(v))
def test_estimate_blocksize(self):
mats = []
mats.append([[0,1],[1,0]])
mats.append([[1,1,0],[0,0,1],[1,0,1]])
mats.append([[0],[0],[1]])
mats = [array(x) for x in mats]
blks = []
blks.append([[1]])
blks.append([[1,1],[1,1]])
blks.append([[1,1],[0,1]])
blks.append([[1,1,0],[1,0,1],[1,1,1]])
blks = [array(x) for x in blks]
for A in mats:
for B in blks:
X = kron(A,B)
r,c = spfuncs.estimate_blocksize(X)
assert_(r >= B.shape[0])
assert_(c >= B.shape[1])
def test_count_blocks(self):
def gold(A,bs):
R,C = bs
I,J = A.nonzero()
return len(set(zip(I//R,J//C)))
mats = []
mats.append([[0]])
mats.append([[1]])
mats.append([[1,0]])
mats.append([[1,1]])
mats.append([[0,1],[1,0]])
mats.append([[1,1,0],[0,0,1],[1,0,1]])
mats.append([[0],[0],[1]])
for A in mats:
for B in mats:
X = kron(A,B)
Y = csr_matrix(X)
for R in range(1,6):
for C in range(1,6):
assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C)))
X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]])
Y = csc_matrix(X)
assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2)))
assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2)))

View file

@ -0,0 +1,424 @@
"""unit tests for sparse utility functions"""
import numpy as np
from numpy.testing import assert_equal
import pytest
from pytest import raises as assert_raises
from scipy.sparse import _sputils as sputils, csr_array, bsr_array, dia_array, coo_array
from scipy.sparse._sputils import matrix
class TestSparseUtils:
def test_upcast(self):
assert_equal(sputils.upcast('intc'), np.intc)
assert_equal(sputils.upcast('int32', 'float32'), np.float64)
assert_equal(sputils.upcast('bool', complex, float), np.complex128)
assert_equal(sputils.upcast('i', 'd'), np.float64)
def test_getdtype(self):
A = np.array([1], dtype='int8')
assert_equal(sputils.getdtype(None, default=float), float)
assert_equal(sputils.getdtype(None, a=A), np.int8)
with assert_raises(
ValueError,
match="scipy.sparse does not support dtype object. .*",
):
sputils.getdtype("O")
with assert_raises(
ValueError,
match="scipy.sparse does not support dtype float16. .*",
):
sputils.getdtype(None, default=np.float16)
def test_isscalarlike(self):
assert_equal(sputils.isscalarlike(3.0), True)
assert_equal(sputils.isscalarlike(-4), True)
assert_equal(sputils.isscalarlike(2.5), True)
assert_equal(sputils.isscalarlike(1 + 3j), True)
assert_equal(sputils.isscalarlike(np.array(3)), True)
assert_equal(sputils.isscalarlike("16"), True)
assert_equal(sputils.isscalarlike(np.array([3])), False)
assert_equal(sputils.isscalarlike([[3]]), False)
assert_equal(sputils.isscalarlike((1,)), False)
assert_equal(sputils.isscalarlike((1, 2)), False)
def test_isintlike(self):
assert_equal(sputils.isintlike(-4), True)
assert_equal(sputils.isintlike(np.array(3)), True)
assert_equal(sputils.isintlike(np.array([3])), False)
with assert_raises(
ValueError,
match="Inexact indices into sparse matrices are not allowed"
):
sputils.isintlike(3.0)
assert_equal(sputils.isintlike(2.5), False)
assert_equal(sputils.isintlike(1 + 3j), False)
assert_equal(sputils.isintlike((1,)), False)
assert_equal(sputils.isintlike((1, 2)), False)
def test_isshape(self):
assert_equal(sputils.isshape((1, 2)), True)
assert_equal(sputils.isshape((5, 2)), True)
assert_equal(sputils.isshape((1.5, 2)), False)
assert_equal(sputils.isshape((2, 2, 2)), False)
assert_equal(sputils.isshape(([2], 2)), False)
assert_equal(sputils.isshape((-1, 2), nonneg=False),True)
assert_equal(sputils.isshape((2, -1), nonneg=False),True)
assert_equal(sputils.isshape((-1, 2), nonneg=True),False)
assert_equal(sputils.isshape((2, -1), nonneg=True),False)
assert_equal(sputils.isshape((1.5, 2), allow_nd=(1, 2)), False)
assert_equal(sputils.isshape(([2], 2), allow_nd=(1, 2)), False)
assert_equal(sputils.isshape((2, 2, -2), nonneg=True, allow_nd=(1, 2)),
False)
assert_equal(sputils.isshape((2,), allow_nd=(1, 2)), True)
assert_equal(sputils.isshape((2, 2,), allow_nd=(1, 2)), True)
assert_equal(sputils.isshape((2, 2, 2), allow_nd=(1, 2)), False)
def test_issequence(self):
assert_equal(sputils.issequence((1,)), True)
assert_equal(sputils.issequence((1, 2, 3)), True)
assert_equal(sputils.issequence([1]), True)
assert_equal(sputils.issequence([1, 2, 3]), True)
assert_equal(sputils.issequence(np.array([1, 2, 3])), True)
assert_equal(sputils.issequence(np.array([[1], [2], [3]])), False)
assert_equal(sputils.issequence(3), False)
def test_ismatrix(self):
assert_equal(sputils.ismatrix(((),)), True)
assert_equal(sputils.ismatrix([[1], [2]]), True)
assert_equal(sputils.ismatrix(np.arange(3)[None]), True)
assert_equal(sputils.ismatrix([1, 2]), False)
assert_equal(sputils.ismatrix(np.arange(3)), False)
assert_equal(sputils.ismatrix([[[1]]]), False)
assert_equal(sputils.ismatrix(3), False)
def test_isdense(self):
assert_equal(sputils.isdense(np.array([1])), True)
assert_equal(sputils.isdense(matrix([1])), True)
def test_validateaxis(self):
with assert_raises(ValueError, match="does not accept 0D axis"):
sputils.validateaxis(())
for ax in [1.5, (0, 1.5), (1.5, 0)]:
with assert_raises(TypeError, match="must be an integer"):
sputils.validateaxis(ax)
for ax in [(1, 1), (1, -1), (0, -2)]:
with assert_raises(ValueError, match="duplicate value in axis"):
sputils.validateaxis(ax)
# ndim 1
for ax in [1, -2, (0, 1), (1, -1)]:
with assert_raises(ValueError, match="out of range"):
sputils.validateaxis(ax, ndim=1)
with assert_raises(ValueError, match="duplicate value in axis"):
sputils.validateaxis((0, -1), ndim=1)
# all valid axis values lead to None when canonical
for axis in (0, -1, None, (0,), (-1,)):
assert sputils.validateaxis(axis, ndim=1) is None
# ndim 2
for ax in [5, -5, (0, 5), (-5, 0)]:
with assert_raises(ValueError, match="out of range"):
sputils.validateaxis(ax, ndim=2)
for axis in ((0,), (1,), None):
assert sputils.validateaxis(axis, ndim=2) == axis
axis_2d = {-2: (0,), -1: (1,), 0: (0,), 1: (1,), (0, 1): None, (0, -1): None}
for axis, canonical_axis in axis_2d.items():
assert sputils.validateaxis(axis, ndim=2) == canonical_axis
# ndim 4
for axis in ((2,), (3,), (2, 3), (2, 1), (0, 3)):
assert sputils.validateaxis(axis, ndim=4) == axis
axis_4d = {-4: (0,), -3: (1,), 2: (2,), 3: (3,), (3, -4): (3, 0)}
for axis, canonical_axis in axis_4d.items():
sputils.validateaxis(axis, ndim=4) == canonical_axis
@pytest.mark.parametrize("container", [csr_array, bsr_array])
def test_safely_cast_index_compressed(self, container):
# This is slow to test completely as nnz > imax is big
# and indptr is big for some shapes
# So we don't test large nnz, nor csc_array (same code as csr_array)
imax = np.int64(np.iinfo(np.int32).max)
# Shape 32bit
A32 = container((1, imax))
# indices big type, small values
B32 = A32.copy()
B32.indices = B32.indices.astype(np.int64)
B32.indptr = B32.indptr.astype(np.int64)
# Shape 64bit
# indices big type, small values
A64 = csr_array((1, imax + 1))
# indices small type, small values
B64 = A64.copy()
B64.indices = B64.indices.astype(np.int32)
B64.indptr = B64.indptr.astype(np.int32)
# indices big type, big values
C64 = A64.copy()
C64.indices = np.array([imax + 1], dtype=np.int64)
C64.indptr = np.array([0, 1], dtype=np.int64)
C64.data = np.array([2.2])
assert (A32.indices.dtype, A32.indptr.dtype) == (np.int32, np.int32)
assert (B32.indices.dtype, B32.indptr.dtype) == (np.int64, np.int64)
assert (A64.indices.dtype, A64.indptr.dtype) == (np.int64, np.int64)
assert (B64.indices.dtype, B64.indptr.dtype) == (np.int32, np.int32)
assert (C64.indices.dtype, C64.indptr.dtype) == (np.int64, np.int64)
for A in [A32, B32, A64, B64]:
indices, indptr = sputils.safely_cast_index_arrays(A, np.int32)
assert (indices.dtype, indptr.dtype) == (np.int32, np.int32)
indices, indptr = sputils.safely_cast_index_arrays(A, np.int64)
assert (indices.dtype, indptr.dtype) == (np.int64, np.int64)
indices, indptr = sputils.safely_cast_index_arrays(A, A.indices.dtype)
assert indices is A.indices
assert indptr is A.indptr
with assert_raises(ValueError):
sputils.safely_cast_index_arrays(C64, np.int32)
indices, indptr = sputils.safely_cast_index_arrays(C64, np.int64)
assert indices is C64.indices
assert indptr is C64.indptr
def test_safely_cast_index_coo(self):
# This is slow to test completely as nnz > imax is big
# So we don't test large nnz
imax = np.int64(np.iinfo(np.int32).max)
# Shape 32bit
A32 = coo_array((1, imax))
# coords big type, small values
B32 = A32.copy()
B32.coords = tuple(co.astype(np.int64) for co in B32.coords)
# Shape 64bit
# coords big type, small values
A64 = coo_array((1, imax + 1))
# coords small type, small values
B64 = A64.copy()
B64.coords = tuple(co.astype(np.int32) for co in B64.coords)
# coords big type, big values
C64 = A64.copy()
C64.coords = (np.array([imax + 1]), np.array([0]))
C64.data = np.array([2.2])
assert A32.coords[0].dtype == np.int32
assert B32.coords[0].dtype == np.int64
assert A64.coords[0].dtype == np.int64
assert B64.coords[0].dtype == np.int32
assert C64.coords[0].dtype == np.int64
for A in [A32, B32, A64, B64]:
coords = sputils.safely_cast_index_arrays(A, np.int32)
assert coords[0].dtype == np.int32
coords = sputils.safely_cast_index_arrays(A, np.int64)
assert coords[0].dtype == np.int64
coords = sputils.safely_cast_index_arrays(A, A.coords[0].dtype)
assert coords[0] is A.coords[0]
with assert_raises(ValueError):
sputils.safely_cast_index_arrays(C64, np.int32)
coords = sputils.safely_cast_index_arrays(C64, np.int64)
assert coords[0] is C64.coords[0]
def test_safely_cast_index_dia(self):
# This is slow to test completely as nnz > imax is big
# So we don't test large nnz
imax = np.int64(np.iinfo(np.int32).max)
# Shape 32bit
A32 = dia_array((1, imax))
# offsets big type, small values
B32 = A32.copy()
B32.offsets = B32.offsets.astype(np.int64)
# Shape 64bit
# offsets big type, small values
A64 = dia_array((1, imax + 2))
# offsets small type, small values
B64 = A64.copy()
B64.offsets = B64.offsets.astype(np.int32)
# offsets big type, big values
C64 = A64.copy()
C64.offsets = np.array([imax + 1])
C64.data = np.array([2.2])
assert A32.offsets.dtype == np.int32
assert B32.offsets.dtype == np.int64
assert A64.offsets.dtype == np.int64
assert B64.offsets.dtype == np.int32
assert C64.offsets.dtype == np.int64
for A in [A32, B32, A64, B64]:
offsets = sputils.safely_cast_index_arrays(A, np.int32)
assert offsets.dtype == np.int32
offsets = sputils.safely_cast_index_arrays(A, np.int64)
assert offsets.dtype == np.int64
offsets = sputils.safely_cast_index_arrays(A, A.offsets.dtype)
assert offsets is A.offsets
with assert_raises(ValueError):
sputils.safely_cast_index_arrays(C64, np.int32)
offsets = sputils.safely_cast_index_arrays(C64, np.int64)
assert offsets is C64.offsets
def test_get_index_dtype(self):
imax = np.int64(np.iinfo(np.int32).max)
too_big = imax + 1
# Check that uint32's with no values too large doesn't return
# int64
a1 = np.ones(90, dtype='uint32')
a2 = np.ones(90, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int32')
)
# Check that if we can not convert but all values are less than or
# equal to max that we can just convert to int32
a1[-1] = imax
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int32')
)
# Check that if it can not convert directly and the contents are
# too large that we return int64
a1[-1] = too_big
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)),
np.dtype('int64')
)
# test that if can not convert and didn't specify to check_contents
# we return int64
a1 = np.ones(89, dtype='uint32')
a2 = np.ones(89, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2))),
np.dtype('int64')
)
# Check that even if we have arrays that can be converted directly
# that if we specify a maxval directly it takes precedence
a1 = np.ones(12, dtype='uint32')
a2 = np.ones(12, dtype='uint32')
assert_equal(
np.dtype(sputils.get_index_dtype(
(a1, a2), maxval=too_big, check_contents=True
)),
np.dtype('int64')
)
# Check that an array with a too max size and maxval set
# still returns int64
a1[-1] = too_big
assert_equal(
np.dtype(sputils.get_index_dtype((a1, a2), maxval=too_big)),
np.dtype('int64')
)
# tests public broadcast_shapes largely from
# numpy/numpy/lib/tests/test_stride_tricks.py
# first 3 cause np.broadcast to raise index too large, but not sputils
@pytest.mark.parametrize("input_shapes,target_shape", [
[((6, 5, 1, 4, 1, 1), (1, 2**32), (2**32, 1)), (6, 5, 1, 4, 2**32, 2**32)],
[((6, 5, 1, 4, 1, 1), (1, 2**32)), (6, 5, 1, 4, 1, 2**32)],
[((1, 2**32), (2**32, 1)), (2**32, 2**32)],
[[2, 2, 2], (2,)],
[[], ()],
[[()], ()],
[[(7,)], (7,)],
[[(1, 2), (2,)], (1, 2)],
[[(2,), (1, 2)], (1, 2)],
[[(1, 1)], (1, 1)],
[[(1, 1), (3, 4)], (3, 4)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[(5, 6, 1)], (5, 6, 1)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
[[(1,), (3,)], (3,)],
[[2, (3, 2)], (3, 2)],
[[(1, 2)] * 32, (1, 2)],
[[(1, 2)] * 100, (1, 2)],
[[(2,)] * 32, (2,)],
])
def test_broadcast_shapes_successes(self, input_shapes, target_shape):
assert_equal(sputils.broadcast_shapes(*input_shapes), target_shape)
# tests public broadcast_shapes failures
@pytest.mark.parametrize("input_shapes", [
[(3,), (4,)],
[(2, 3), (2,)],
[2, (2, 3)],
[(3,), (3,), (4,)],
[(2, 5), (3, 5)],
[(2, 4), (2, 5)],
[(1, 3, 4), (2, 3, 3)],
[(1, 2), (3, 1), (3, 2), (10, 5)],
[(2,)] * 32 + [(3,)] * 32,
])
def test_broadcast_shapes_failures(self, input_shapes):
with assert_raises(ValueError, match="cannot be broadcast"):
sputils.broadcast_shapes(*input_shapes)
def test_check_shape_overflow(self):
new_shape = sputils.check_shape([(10, -1)], (65535, 131070))
assert_equal(new_shape, (10, 858967245))
def test_matrix(self):
a = [[1, 2, 3]]
b = np.array(a)
assert isinstance(sputils.matrix(a), np.matrix)
assert isinstance(sputils.matrix(b), np.matrix)
c = sputils.matrix(b)
c[:, :] = 123
assert_equal(b, a)
c = sputils.matrix(b, copy=False)
c[:, :] = 123
assert_equal(b, [[123, 123, 123]])
def test_asmatrix(self):
a = [[1, 2, 3]]
b = np.array(a)
assert isinstance(sputils.asmatrix(a), np.matrix)
assert isinstance(sputils.asmatrix(b), np.matrix)
c = sputils.asmatrix(b)
c[:, :] = 123
assert_equal(b, [[123, 123, 123]])