remove venv

This commit is contained in:
Tykayn 2025-08-30 18:57:59 +02:00 committed by tykayn
parent 056387013d
commit 0680c7594e
13999 changed files with 0 additions and 2895688 deletions

View file

@ -1,10 +0,0 @@
from pathlib import Path
# Check that the test directories exist.
if not (Path(__file__).parent / 'baseline_images').exists():
raise OSError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')

View file

@ -1,2 +0,0 @@
from matplotlib.testing.conftest import ( # noqa
mpl_test_settings, pytest_configure, pytest_unconfigure, pd, text_placeholders, xr)

View file

@ -1,137 +0,0 @@
from io import BytesIO
import pytest
import logging
from matplotlib import _afm
from matplotlib import font_manager as fm
# See note in afm.py re: use of comma as decimal separator in the
# UnderlineThickness field and re: use of non-ASCII characters in the Notice
# field.
AFM_TEST_DATA = b"""StartFontMetrics 2.0
Comment Comments are ignored.
Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
FontName MyFont-Bold
EncodingScheme FontSpecific
FullName My Font Bold
FamilyName Test Fonts
Weight Bold
ItalicAngle 0.0
IsFixedPitch false
UnderlinePosition -100
UnderlineThickness 56,789
Version 001.000
Notice Copyright \xa9 2017 No one.
FontBBox 0 -321 1234 369
StartCharMetrics 3
C 0 ; WX 250 ; N space ; B 0 0 0 0 ;
C 42 ; WX 1141 ; N foo ; B 40 60 800 360 ;
C 99 ; WX 583 ; N bar ; B 40 -10 543 210 ;
EndCharMetrics
EndFontMetrics
"""
def test_nonascii_str():
# This tests that we also decode bytes as utf-8 properly.
# Else, font files with non ascii characters fail to load.
inp_str = "привет"
byte_str = inp_str.encode("utf8")
ret = _afm._to_str(byte_str)
assert ret == inp_str
def test_parse_header():
fh = BytesIO(AFM_TEST_DATA)
header = _afm._parse_header(fh)
assert header == {
b'StartFontMetrics': 2.0,
b'FontName': 'MyFont-Bold',
b'EncodingScheme': 'FontSpecific',
b'FullName': 'My Font Bold',
b'FamilyName': 'Test Fonts',
b'Weight': 'Bold',
b'ItalicAngle': 0.0,
b'IsFixedPitch': False,
b'UnderlinePosition': -100,
b'UnderlineThickness': 56.789,
b'Version': '001.000',
b'Notice': b'Copyright \xa9 2017 No one.',
b'FontBBox': [0, -321, 1234, 369],
b'StartCharMetrics': 3,
}
def test_parse_char_metrics():
fh = BytesIO(AFM_TEST_DATA)
_afm._parse_header(fh) # position
metrics = _afm._parse_char_metrics(fh)
assert metrics == (
{0: (250.0, 'space', [0, 0, 0, 0]),
42: (1141.0, 'foo', [40, 60, 800, 360]),
99: (583.0, 'bar', [40, -10, 543, 210]),
},
{'space': (250.0, 'space', [0, 0, 0, 0]),
'foo': (1141.0, 'foo', [40, 60, 800, 360]),
'bar': (583.0, 'bar', [40, -10, 543, 210]),
})
def test_get_familyname_guessed():
fh = BytesIO(AFM_TEST_DATA)
font = _afm.AFM(fh)
del font._header[b'FamilyName'] # remove FamilyName, so we have to guess
assert font.get_familyname() == 'My Font'
def test_font_manager_weight_normalization():
font = _afm.AFM(BytesIO(
AFM_TEST_DATA.replace(b"Weight Bold\n", b"Weight Custom\n")))
assert fm.afmFontProperty("", font).weight == "normal"
@pytest.mark.parametrize(
"afm_data",
[
b"""nope
really nope""",
b"""StartFontMetrics 2.0
Comment Comments are ignored.
Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
FontName MyFont-Bold
EncodingScheme FontSpecific""",
],
)
def test_bad_afm(afm_data):
fh = BytesIO(afm_data)
with pytest.raises(RuntimeError):
_afm._parse_header(fh)
@pytest.mark.parametrize(
"afm_data",
[
b"""StartFontMetrics 2.0
Comment Comments are ignored.
Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
Aardvark bob
FontName MyFont-Bold
EncodingScheme FontSpecific
StartCharMetrics 3""",
b"""StartFontMetrics 2.0
Comment Comments are ignored.
Comment Creation Date:Mon Nov 13 12:34:11 GMT 2017
ItalicAngle zero degrees
FontName MyFont-Bold
EncodingScheme FontSpecific
StartCharMetrics 3""",
],
)
def test_malformed_header(afm_data, caplog):
fh = BytesIO(afm_data)
with caplog.at_level(logging.ERROR):
_afm._parse_header(fh)
assert len(caplog.records) == 1

View file

@ -1,340 +0,0 @@
import io
import numpy as np
from numpy.testing import assert_array_almost_equal
from PIL import features, Image, TiffTags
import pytest
from matplotlib import (
collections, patheffects, pyplot as plt, transforms as mtransforms,
rcParams, rc_context)
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.figure import Figure
from matplotlib.image import imread
from matplotlib.path import Path
from matplotlib.testing.decorators import image_comparison
from matplotlib.transforms import IdentityTransform
def test_repeated_save_with_alpha():
# We want an image which has a background color of bluish green, with an
# alpha of 0.25.
fig = Figure([1, 0.4])
fig.set_facecolor((0, 1, 0.4))
fig.patch.set_alpha(0.25)
# The target color is fig.patch.get_facecolor()
buf = io.BytesIO()
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Save the figure again to check that the
# colors don't bleed from the previous renderer.
buf.seek(0)
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Check the first pixel has the desired color & alpha
# (approx: 0, 1.0, 0.4, 0.25)
buf.seek(0)
assert_array_almost_equal(tuple(imread(buf)[0, 0]),
(0.0, 1.0, 0.4, 0.250),
decimal=3)
def test_large_single_path_collection():
buff = io.BytesIO()
# Generates a too-large single path in a path collection that
# would cause a segfault if the draw_markers optimization is
# applied.
f, ax = plt.subplots()
collection = collections.PathCollection(
[Path([[-10, 5], [10, 5], [10, -5], [-10, -5], [-10, 5]])])
ax.add_artist(collection)
ax.set_xlim(10**-3, 1)
plt.savefig(buff)
def test_marker_with_nan():
# This creates a marker with nans in it, which was segfaulting the
# Agg backend (see #3722)
fig, ax = plt.subplots(1)
steps = 1000
data = np.arange(steps)
ax.semilogx(data)
ax.fill_between(data, data*0.8, data*1.2)
buf = io.BytesIO()
fig.savefig(buf, format='png')
def test_long_path():
buff = io.BytesIO()
fig = Figure()
ax = fig.subplots()
points = np.ones(100_000)
points[::2] *= -1
ax.plot(points)
fig.savefig(buff, format='png')
@image_comparison(['agg_filter.png'], remove_text=True)
def test_agg_filter():
def smooth1d(x, window_len):
# copied from https://scipy-cookbook.readthedocs.io/
s = np.r_[
2*x[0] - x[window_len:1:-1], x, 2*x[-1] - x[-1:-window_len:-1]]
w = np.hanning(window_len)
y = np.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def smooth2d(A, sigma=3):
window_len = max(int(sigma), 3) * 2 + 1
A = np.apply_along_axis(smooth1d, 0, A, window_len)
A = np.apply_along_axis(smooth1d, 1, A, window_len)
return A
class BaseFilter:
def get_pad(self, dpi):
return 0
def process_image(self, padded_src, dpi):
raise NotImplementedError("Should be overridden by subclasses")
def __call__(self, im, dpi):
pad = self.get_pad(dpi)
padded_src = np.pad(im, [(pad, pad), (pad, pad), (0, 0)],
"constant")
tgt_image = self.process_image(padded_src, dpi)
return tgt_image, -pad, -pad
class OffsetFilter(BaseFilter):
def __init__(self, offsets=(0, 0)):
self.offsets = offsets
def get_pad(self, dpi):
return int(max(self.offsets) / 72 * dpi)
def process_image(self, padded_src, dpi):
ox, oy = self.offsets
a1 = np.roll(padded_src, int(ox / 72 * dpi), axis=1)
a2 = np.roll(a1, -int(oy / 72 * dpi), axis=0)
return a2
class GaussianFilter(BaseFilter):
"""Simple Gaussian filter."""
def __init__(self, sigma, alpha=0.5, color=(0, 0, 0)):
self.sigma = sigma
self.alpha = alpha
self.color = color
def get_pad(self, dpi):
return int(self.sigma*3 / 72 * dpi)
def process_image(self, padded_src, dpi):
tgt_image = np.empty_like(padded_src)
tgt_image[:, :, :3] = self.color
tgt_image[:, :, 3] = smooth2d(padded_src[:, :, 3] * self.alpha,
self.sigma / 72 * dpi)
return tgt_image
class DropShadowFilter(BaseFilter):
def __init__(self, sigma, alpha=0.3, color=(0, 0, 0), offsets=(0, 0)):
self.gauss_filter = GaussianFilter(sigma, alpha, color)
self.offset_filter = OffsetFilter(offsets)
def get_pad(self, dpi):
return max(self.gauss_filter.get_pad(dpi),
self.offset_filter.get_pad(dpi))
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
t2 = self.offset_filter.process_image(t1, dpi)
return t2
fig, ax = plt.subplots()
# draw lines
line1, = ax.plot([0.1, 0.5, 0.9], [0.1, 0.9, 0.5], "bo-",
mec="b", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
line2, = ax.plot([0.1, 0.5, 0.9], [0.5, 0.2, 0.7], "ro-",
mec="r", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
gauss = DropShadowFilter(4)
for line in [line1, line2]:
# draw shadows with same lines with slight offset.
xx = line.get_xdata()
yy = line.get_ydata()
shadow, = ax.plot(xx, yy)
shadow.update_from(line)
# offset transform
transform = mtransforms.offset_copy(
line.get_transform(), fig, x=4.0, y=-6.0, units='points')
shadow.set_transform(transform)
# adjust zorder of the shadow lines so that it is drawn below the
# original lines
shadow.set_zorder(line.get_zorder() - 0.5)
shadow.set_agg_filter(gauss)
shadow.set_rasterized(True) # to support mixed-mode renderers
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
def test_too_large_image():
fig = plt.figure(figsize=(300, 2**25))
buff = io.BytesIO()
with pytest.raises(ValueError):
fig.savefig(buff)
def test_chunksize():
x = range(200)
# Test without chunksize
fig, ax = plt.subplots()
ax.plot(x, np.sin(x))
fig.canvas.draw()
# Test with chunksize
fig, ax = plt.subplots()
rcParams['agg.path.chunksize'] = 105
ax.plot(x, np.sin(x))
fig.canvas.draw()
@pytest.mark.backend('Agg')
def test_jpeg_dpi():
# Check that dpi is set correctly in jpg files.
plt.plot([0, 1, 2], [0, 1, 0])
buf = io.BytesIO()
plt.savefig(buf, format="jpg", dpi=200)
im = Image.open(buf)
assert im.info['dpi'] == (200, 200)
def test_pil_kwargs_png():
from PIL.PngImagePlugin import PngInfo
buf = io.BytesIO()
pnginfo = PngInfo()
pnginfo.add_text("Software", "test")
plt.figure().savefig(buf, format="png", pil_kwargs={"pnginfo": pnginfo})
im = Image.open(buf)
assert im.info["Software"] == "test"
def test_pil_kwargs_tiff():
buf = io.BytesIO()
pil_kwargs = {"description": "test image"}
plt.figure().savefig(buf, format="tiff", pil_kwargs=pil_kwargs)
im = Image.open(buf)
tags = {TiffTags.TAGS_V2[k].name: v for k, v in im.tag_v2.items()}
assert tags["ImageDescription"] == "test image"
@pytest.mark.skipif(not features.check("webp"), reason="WebP support not available")
def test_pil_kwargs_webp():
plt.plot([0, 1, 2], [0, 1, 0])
buf_small = io.BytesIO()
pil_kwargs_low = {"quality": 1}
plt.savefig(buf_small, format="webp", pil_kwargs=pil_kwargs_low)
assert len(pil_kwargs_low) == 1
buf_large = io.BytesIO()
pil_kwargs_high = {"quality": 100}
plt.savefig(buf_large, format="webp", pil_kwargs=pil_kwargs_high)
assert len(pil_kwargs_high) == 1
assert buf_large.getbuffer().nbytes > buf_small.getbuffer().nbytes
@pytest.mark.skipif(not features.check("webp"), reason="WebP support not available")
def test_webp_alpha():
plt.plot([0, 1, 2], [0, 1, 0])
buf = io.BytesIO()
plt.savefig(buf, format="webp", transparent=True)
im = Image.open(buf)
assert im.mode == "RGBA"
def test_draw_path_collection_error_handling():
fig, ax = plt.subplots()
ax.scatter([1], [1]).set_paths(Path([(0, 1), (2, 3)]))
with pytest.raises(TypeError):
fig.canvas.draw()
def test_chunksize_fails():
# NOTE: This test covers multiple independent test scenarios in a single
# function, because each scenario uses ~2GB of memory and we don't
# want parallel test executors to accidentally run multiple of these
# at the same time.
N = 100_000
dpi = 500
w = 5*dpi
h = 6*dpi
# make a Path that spans the whole w-h rectangle
x = np.linspace(0, w, N)
y = np.ones(N) * h
y[::2] = 0
path = Path(np.vstack((x, y)).T)
# effectively disable path simplification (but leaving it "on")
path.simplify_threshold = 0
# setup the minimal GraphicsContext to draw a Path
ra = RendererAgg(w, h, dpi)
gc = ra.new_gc()
gc.set_linewidth(1)
gc.set_foreground('r')
gc.set_hatch('/')
with pytest.raises(OverflowError, match='cannot split hatched path'):
ra.draw_path(gc, path, IdentityTransform())
gc.set_hatch(None)
with pytest.raises(OverflowError, match='cannot split filled path'):
ra.draw_path(gc, path, IdentityTransform(), (1, 0, 0))
# Set to zero to disable, currently defaults to 0, but let's be sure.
with rc_context({'agg.path.chunksize': 0}):
with pytest.raises(OverflowError, match='Please set'):
ra.draw_path(gc, path, IdentityTransform())
# Set big enough that we do not try to chunk.
with rc_context({'agg.path.chunksize': 1_000_000}):
with pytest.raises(OverflowError, match='Please reduce'):
ra.draw_path(gc, path, IdentityTransform())
# Small enough we will try to chunk, but big enough we will fail to render.
with rc_context({'agg.path.chunksize': 90_000}):
with pytest.raises(OverflowError, match='Please reduce'):
ra.draw_path(gc, path, IdentityTransform())
path.should_simplify = False
with pytest.raises(OverflowError, match="should_simplify is False"):
ra.draw_path(gc, path, IdentityTransform())
def test_non_tuple_rgbaface():
# This passes rgbaFace as a ndarray to draw_path.
fig = plt.figure()
fig.add_subplot(projection="3d").scatter(
[0, 1, 2], [0, 1, 2], path_effects=[patheffects.Stroke(linewidth=4)])
fig.canvas.draw()

View file

@ -1,33 +0,0 @@
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
@image_comparison(baseline_images=['agg_filter_alpha'],
extensions=['png', 'pdf'])
def test_agg_filter_alpha():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
ax = plt.axes()
x, y = np.mgrid[0:7, 0:8]
data = x**2 - y**2
mesh = ax.pcolormesh(data, cmap='Reds', zorder=5)
def manual_alpha(im, dpi):
im[:, :, 3] *= 0.6
print('CALLED')
return im, 0, 0
# Note: Doing alpha like this is not the same as setting alpha on
# the mesh itself. Currently meshes are drawn as independent patches,
# and we see fine borders around the blocks of color. See the SO
# question for an example: https://stackoverflow.com/q/20678817/
mesh.set_agg_filter(manual_alpha)
# Currently we must enable rasterization for this to have an effect in
# the PDF backend.
mesh.set_rasterized(True)
ax.plot([0, 4, 7], [1, 3, 8])

View file

@ -1,571 +0,0 @@
import os
from pathlib import Path
import platform
import re
import shutil
import subprocess
import sys
import weakref
import numpy as np
import pytest
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import animation
from matplotlib.animation import PillowWriter
from matplotlib.testing.decorators import check_figures_equal
@pytest.fixture()
def anim(request):
"""Create a simple animation (with options)."""
fig, ax = plt.subplots()
line, = ax.plot([], [])
ax.set_xlim(0, 10)
ax.set_ylim(-1, 1)
def init():
line.set_data([], [])
return line,
def animate(i):
x = np.linspace(0, 10, 100)
y = np.sin(x + i)
line.set_data(x, y)
return line,
# "klass" can be passed to determine the class returned by the fixture
kwargs = dict(getattr(request, 'param', {})) # make a copy
klass = kwargs.pop('klass', animation.FuncAnimation)
if 'frames' not in kwargs:
kwargs['frames'] = 5
return klass(fig=fig, func=animate, init_func=init, **kwargs)
class NullMovieWriter(animation.AbstractMovieWriter):
"""
A minimal MovieWriter. It doesn't actually write anything.
It just saves the arguments that were given to the setup() and
grab_frame() methods as attributes, and counts how many times
grab_frame() is called.
This class doesn't have an __init__ method with the appropriate
signature, and it doesn't define an isAvailable() method, so
it cannot be added to the 'writers' registry.
"""
def setup(self, fig, outfile, dpi, *args):
self.fig = fig
self.outfile = outfile
self.dpi = dpi
self.args = args
self._count = 0
def grab_frame(self, **savefig_kwargs):
from matplotlib.animation import _validate_grabframe_kwargs
_validate_grabframe_kwargs(savefig_kwargs)
self.savefig_kwargs = savefig_kwargs
self._count += 1
def finish(self):
pass
def test_null_movie_writer(anim):
# Test running an animation with NullMovieWriter.
plt.rcParams["savefig.facecolor"] = "auto"
filename = "unused.null"
dpi = 50
savefig_kwargs = dict(foo=0)
writer = NullMovieWriter()
anim.save(filename, dpi=dpi, writer=writer,
savefig_kwargs=savefig_kwargs)
assert writer.fig == plt.figure(1) # The figure used by anim fixture
assert writer.outfile == filename
assert writer.dpi == dpi
assert writer.args == ()
# we enrich the savefig kwargs to ensure we composite transparent
# output to an opaque background
for k, v in savefig_kwargs.items():
assert writer.savefig_kwargs[k] == v
assert writer._count == anim._save_count
@pytest.mark.parametrize('anim', [dict(klass=dict)], indirect=['anim'])
def test_animation_delete(anim):
if platform.python_implementation() == 'PyPy':
# Something in the test setup fixture lingers around into the test and
# breaks pytest.warns on PyPy. This garbage collection fixes it.
# https://foss.heptapod.net/pypy/pypy/-/issues/3536
np.testing.break_cycles()
anim = animation.FuncAnimation(**anim)
with pytest.warns(Warning, match='Animation was deleted'):
del anim
np.testing.break_cycles()
def test_movie_writer_dpi_default():
class DummyMovieWriter(animation.MovieWriter):
def _run(self):
pass
# Test setting up movie writer with figure.dpi default.
fig = plt.figure()
filename = "unused.null"
fps = 5
codec = "unused"
bitrate = 1
extra_args = ["unused"]
writer = DummyMovieWriter(fps, codec, bitrate, extra_args)
writer.setup(fig, filename)
assert writer.dpi == fig.dpi
@animation.writers.register('null')
class RegisteredNullMovieWriter(NullMovieWriter):
# To be able to add NullMovieWriter to the 'writers' registry,
# we must define an __init__ method with a specific signature,
# and we must define the class method isAvailable().
# (These methods are not actually required to use an instance
# of this class as the 'writer' argument of Animation.save().)
def __init__(self, fps=None, codec=None, bitrate=None,
extra_args=None, metadata=None):
pass
@classmethod
def isAvailable(cls):
return True
WRITER_OUTPUT = [
('ffmpeg', 'movie.mp4'),
('ffmpeg_file', 'movie.mp4'),
('imagemagick', 'movie.gif'),
('imagemagick_file', 'movie.gif'),
('pillow', 'movie.gif'),
('html', 'movie.html'),
('null', 'movie.null')
]
def gen_writers():
for writer, output in WRITER_OUTPUT:
if not animation.writers.is_available(writer):
mark = pytest.mark.skip(
f"writer '{writer}' not available on this system")
yield pytest.param(writer, None, output, marks=[mark])
yield pytest.param(writer, None, Path(output), marks=[mark])
continue
writer_class = animation.writers[writer]
for frame_format in getattr(writer_class, 'supported_formats', [None]):
yield writer, frame_format, output
yield writer, frame_format, Path(output)
# Smoke test for saving animations. In the future, we should probably
# design more sophisticated tests which compare resulting frames a-la
# matplotlib.testing.image_comparison
@pytest.mark.parametrize('writer, frame_format, output', gen_writers())
@pytest.mark.parametrize('anim', [dict(klass=dict)], indirect=['anim'])
def test_save_animation_smoketest(tmpdir, writer, frame_format, output, anim):
if frame_format is not None:
plt.rcParams["animation.frame_format"] = frame_format
anim = animation.FuncAnimation(**anim)
dpi = None
codec = None
if writer == 'ffmpeg':
# Issue #8253
anim._fig.set_size_inches((10.85, 9.21))
dpi = 100.
codec = 'h264'
# Use temporary directory for the file-based writers, which produce a file
# per frame with known names.
with tmpdir.as_cwd():
anim.save(output, fps=30, writer=writer, bitrate=500, dpi=dpi,
codec=codec)
del anim
@pytest.mark.parametrize('writer, frame_format, output', gen_writers())
def test_grabframe(tmpdir, writer, frame_format, output):
WriterClass = animation.writers[writer]
if frame_format is not None:
plt.rcParams["animation.frame_format"] = frame_format
fig, ax = plt.subplots()
dpi = None
codec = None
if writer == 'ffmpeg':
# Issue #8253
fig.set_size_inches((10.85, 9.21))
dpi = 100.
codec = 'h264'
test_writer = WriterClass()
# Use temporary directory for the file-based writers, which produce a file
# per frame with known names.
with tmpdir.as_cwd():
with test_writer.saving(fig, output, dpi):
# smoke test it works
test_writer.grab_frame()
for k in {'dpi', 'bbox_inches', 'format'}:
with pytest.raises(
TypeError,
match=f"grab_frame got an unexpected keyword argument {k!r}"
):
test_writer.grab_frame(**{k: object()})
@pytest.mark.parametrize('writer', [
pytest.param(
'ffmpeg', marks=pytest.mark.skipif(
not animation.FFMpegWriter.isAvailable(),
reason='Requires FFMpeg')),
pytest.param(
'imagemagick', marks=pytest.mark.skipif(
not animation.ImageMagickWriter.isAvailable(),
reason='Requires ImageMagick')),
])
@pytest.mark.parametrize('html, want', [
('none', None),
('html5', '<video width'),
('jshtml', '<script ')
])
@pytest.mark.parametrize('anim', [dict(klass=dict)], indirect=['anim'])
def test_animation_repr_html(writer, html, want, anim):
if platform.python_implementation() == 'PyPy':
# Something in the test setup fixture lingers around into the test and
# breaks pytest.warns on PyPy. This garbage collection fixes it.
# https://foss.heptapod.net/pypy/pypy/-/issues/3536
np.testing.break_cycles()
if (writer == 'imagemagick' and html == 'html5'
# ImageMagick delegates to ffmpeg for this format.
and not animation.FFMpegWriter.isAvailable()):
pytest.skip('Requires FFMpeg')
# create here rather than in the fixture otherwise we get __del__ warnings
# about producing no output
anim = animation.FuncAnimation(**anim)
with plt.rc_context({'animation.writer': writer,
'animation.html': html}):
html = anim._repr_html_()
if want is None:
assert html is None
with pytest.warns(UserWarning):
del anim # Animation was never run, so will warn on cleanup.
np.testing.break_cycles()
else:
assert want in html
@pytest.mark.parametrize(
'anim',
[{'save_count': 10, 'frames': iter(range(5))}],
indirect=['anim']
)
def test_no_length_frames(anim):
anim.save('unused.null', writer=NullMovieWriter())
def test_movie_writer_registry():
assert len(animation.writers._registered) > 0
mpl.rcParams['animation.ffmpeg_path'] = "not_available_ever_xxxx"
assert not animation.writers.is_available("ffmpeg")
# something guaranteed to be available in path and exits immediately
bin = "true" if sys.platform != 'win32' else "where"
mpl.rcParams['animation.ffmpeg_path'] = bin
assert animation.writers.is_available("ffmpeg")
@pytest.mark.parametrize(
"method_name",
[pytest.param("to_html5_video", marks=pytest.mark.skipif(
not animation.writers.is_available(mpl.rcParams["animation.writer"]),
reason="animation writer not installed")),
"to_jshtml"])
@pytest.mark.parametrize('anim', [dict(frames=1)], indirect=['anim'])
def test_embed_limit(method_name, caplog, tmpdir, anim):
caplog.set_level("WARNING")
with tmpdir.as_cwd():
with mpl.rc_context({"animation.embed_limit": 1e-6}): # ~1 byte.
getattr(anim, method_name)()
assert len(caplog.records) == 1
record, = caplog.records
assert (record.name == "matplotlib.animation"
and record.levelname == "WARNING")
@pytest.mark.parametrize(
"method_name",
[pytest.param("to_html5_video", marks=pytest.mark.skipif(
not animation.writers.is_available(mpl.rcParams["animation.writer"]),
reason="animation writer not installed")),
"to_jshtml"])
@pytest.mark.parametrize('anim', [dict(frames=1)], indirect=['anim'])
def test_cleanup_temporaries(method_name, tmpdir, anim):
with tmpdir.as_cwd():
getattr(anim, method_name)()
assert list(Path(str(tmpdir)).iterdir()) == []
@pytest.mark.skipif(shutil.which("/bin/sh") is None, reason="requires a POSIX OS")
def test_failing_ffmpeg(tmpdir, monkeypatch, anim):
"""
Test that we correctly raise a CalledProcessError when ffmpeg fails.
To do so, mock ffmpeg using a simple executable shell script that
succeeds when called with no arguments (so that it gets registered by
`isAvailable`), but fails otherwise, and add it to the $PATH.
"""
with tmpdir.as_cwd():
monkeypatch.setenv("PATH", ".:" + os.environ["PATH"])
exe_path = Path(str(tmpdir), "ffmpeg")
exe_path.write_bytes(b"#!/bin/sh\n[[ $@ -eq 0 ]]\n")
os.chmod(exe_path, 0o755)
with pytest.raises(subprocess.CalledProcessError):
anim.save("test.mpeg")
@pytest.mark.parametrize("cache_frame_data", [False, True])
def test_funcanimation_cache_frame_data(cache_frame_data):
fig, ax = plt.subplots()
line, = ax.plot([], [])
class Frame(dict):
# this subclassing enables to use weakref.ref()
pass
def init():
line.set_data([], [])
return line,
def animate(frame):
line.set_data(frame['x'], frame['y'])
return line,
frames_generated = []
def frames_generator():
for _ in range(5):
x = np.linspace(0, 10, 100)
y = np.random.rand(100)
frame = Frame(x=x, y=y)
# collect weak references to frames
# to validate their references later
frames_generated.append(weakref.ref(frame))
yield frame
MAX_FRAMES = 100
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frames_generator,
cache_frame_data=cache_frame_data,
save_count=MAX_FRAMES)
writer = NullMovieWriter()
anim.save('unused.null', writer=writer)
assert len(frames_generated) == 5
np.testing.break_cycles()
for f in frames_generated:
# If cache_frame_data is True, then the weakref should be alive;
# if cache_frame_data is False, then the weakref should be dead (None).
assert (f() is None) != cache_frame_data
@pytest.mark.parametrize('return_value', [
# User forgot to return (returns None).
None,
# User returned a string.
'string',
# User returned an int.
1,
# User returns a sequence of other objects, e.g., string instead of Artist.
('string', ),
# User forgot to return a sequence (handled in `animate` below.)
'artist',
])
def test_draw_frame(return_value):
# test _draw_frame method
fig, ax = plt.subplots()
line, = ax.plot([])
def animate(i):
# general update func
line.set_data([0, 1], [0, i])
if return_value == 'artist':
# *not* a sequence
return line
else:
return return_value
with pytest.raises(RuntimeError):
animation.FuncAnimation(
fig, animate, blit=True, cache_frame_data=False
)
def test_exhausted_animation(tmpdir):
fig, ax = plt.subplots()
def update(frame):
return []
anim = animation.FuncAnimation(
fig, update, frames=iter(range(10)), repeat=False,
cache_frame_data=False
)
with tmpdir.as_cwd():
anim.save("test.gif", writer='pillow')
with pytest.warns(UserWarning, match="exhausted"):
anim._start()
def test_no_frame_warning(tmpdir):
fig, ax = plt.subplots()
def update(frame):
return []
anim = animation.FuncAnimation(
fig, update, frames=[], repeat=False,
cache_frame_data=False
)
with pytest.warns(UserWarning, match="exhausted"):
anim._start()
@check_figures_equal(extensions=["png"])
def test_animation_frame(tmpdir, fig_test, fig_ref):
# Test the expected image after iterating through a few frames
# we save the animation to get the iteration because we are not
# in an interactive framework.
ax = fig_test.add_subplot()
ax.set_xlim(0, 2 * np.pi)
ax.set_ylim(-1, 1)
x = np.linspace(0, 2 * np.pi, 100)
line, = ax.plot([], [])
def init():
line.set_data([], [])
return line,
def animate(i):
line.set_data(x, np.sin(x + i / 100))
return line,
anim = animation.FuncAnimation(
fig_test, animate, init_func=init, frames=5,
blit=True, repeat=False)
with tmpdir.as_cwd():
anim.save("test.gif")
# Reference figure without animation
ax = fig_ref.add_subplot()
ax.set_xlim(0, 2 * np.pi)
ax.set_ylim(-1, 1)
# 5th frame's data
ax.plot(x, np.sin(x + 4 / 100))
@pytest.mark.parametrize('anim', [dict(klass=dict)], indirect=['anim'])
def test_save_count_override_warnings_has_length(anim):
save_count = 5
frames = list(range(2))
match_target = (
f'You passed in an explicit {save_count=} '
"which is being ignored in favor of "
f"{len(frames)=}."
)
with pytest.warns(UserWarning, match=re.escape(match_target)):
anim = animation.FuncAnimation(
**{**anim, 'frames': frames, 'save_count': save_count}
)
assert anim._save_count == len(frames)
anim._init_draw()
@pytest.mark.parametrize('anim', [dict(klass=dict)], indirect=['anim'])
def test_save_count_override_warnings_scaler(anim):
save_count = 5
frames = 7
match_target = (
f'You passed in an explicit {save_count=} ' +
"which is being ignored in favor of " +
f"{frames=}."
)
with pytest.warns(UserWarning, match=re.escape(match_target)):
anim = animation.FuncAnimation(
**{**anim, 'frames': frames, 'save_count': save_count}
)
assert anim._save_count == frames
anim._init_draw()
@pytest.mark.parametrize('anim', [dict(klass=dict)], indirect=['anim'])
def test_disable_cache_warning(anim):
cache_frame_data = True
frames = iter(range(5))
match_target = (
f"{frames=!r} which we can infer the length of, "
"did not pass an explicit *save_count* "
f"and passed {cache_frame_data=}. To avoid a possibly "
"unbounded cache, frame data caching has been disabled. "
"To suppress this warning either pass "
"`cache_frame_data=False` or `save_count=MAX_FRAMES`."
)
with pytest.warns(UserWarning, match=re.escape(match_target)):
anim = animation.FuncAnimation(
**{**anim, 'cache_frame_data': cache_frame_data, 'frames': frames}
)
assert anim._cache_frame_data is False
anim._init_draw()
def test_movie_writer_invalid_path(anim):
if sys.platform == "win32":
match_str = r"\[WinError 3] .*'\\\\foo\\\\bar\\\\aardvark'"
else:
match_str = r"\[Errno 2] .*'/foo"
with pytest.raises(FileNotFoundError, match=match_str):
anim.save("/foo/bar/aardvark/thiscannotreallyexist.mp4",
writer=animation.FFMpegFileWriter())
def test_animation_with_transparency():
"""Test animation exhaustion with transparency using PillowWriter directly"""
fig, ax = plt.subplots()
rect = plt.Rectangle((0, 0), 1, 1, color='red', alpha=0.5)
ax.add_patch(rect)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
writer = PillowWriter(fps=30)
writer.setup(fig, 'unused.gif', dpi=100)
writer.grab_frame(transparent=True)
frame = writer._frames[-1]
# Check that the alpha channel is not 255, so frame has transparency
assert frame.getextrema()[3][0] < 255
plt.close(fig)

View file

@ -1,152 +0,0 @@
from __future__ import annotations
from collections.abc import Callable
import re
import typing
from typing import Any, TypeVar
import numpy as np
import pytest
import matplotlib as mpl
from matplotlib import _api
if typing.TYPE_CHECKING:
from typing_extensions import Self
T = TypeVar('T')
@pytest.mark.parametrize('target,shape_repr,test_shape',
[((None, ), "(N,)", (1, 3)),
((None, 3), "(N, 3)", (1,)),
((None, 3), "(N, 3)", (1, 2)),
((1, 5), "(1, 5)", (1, 9)),
((None, 2, None), "(M, 2, N)", (1, 3, 1))
])
def test_check_shape(target: tuple[int | None, ...],
shape_repr: str,
test_shape: tuple[int, ...]) -> None:
error_pattern = "^" + re.escape(
f"'aardvark' must be {len(target)}D with shape {shape_repr}, but your input "
f"has shape {test_shape}")
data = np.zeros(test_shape)
with pytest.raises(ValueError, match=error_pattern):
_api.check_shape(target, aardvark=data)
def test_classproperty_deprecation() -> None:
class A:
@_api.deprecated("0.0.0")
@_api.classproperty
def f(cls: Self) -> None:
pass
with pytest.warns(mpl.MatplotlibDeprecationWarning):
A.f
with pytest.warns(mpl.MatplotlibDeprecationWarning):
a = A()
a.f
def test_warn_deprecated():
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match=r'foo was deprecated in Matplotlib 3\.10 and will be '
r'removed in 3\.12\.'):
_api.warn_deprecated('3.10', name='foo')
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match=r'The foo class was deprecated in Matplotlib 3\.10 and '
r'will be removed in 3\.12\.'):
_api.warn_deprecated('3.10', name='foo', obj_type='class')
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match=r'foo was deprecated in Matplotlib 3\.10 and will be '
r'removed in 3\.12\. Use bar instead\.'):
_api.warn_deprecated('3.10', name='foo', alternative='bar')
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match=r'foo was deprecated in Matplotlib 3\.10 and will be '
r'removed in 3\.12\. More information\.'):
_api.warn_deprecated('3.10', name='foo', addendum='More information.')
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match=r'foo was deprecated in Matplotlib 3\.10 and will be '
r'removed in 4\.0\.'):
_api.warn_deprecated('3.10', name='foo', removal='4.0')
with pytest.warns(mpl.MatplotlibDeprecationWarning,
match=r'foo was deprecated in Matplotlib 3\.10\.'):
_api.warn_deprecated('3.10', name='foo', removal=False)
with pytest.warns(PendingDeprecationWarning,
match=r'foo will be deprecated in a future version'):
_api.warn_deprecated('3.10', name='foo', pending=True)
with pytest.raises(ValueError, match=r'cannot have a scheduled removal'):
_api.warn_deprecated('3.10', name='foo', pending=True, removal='3.12')
with pytest.warns(mpl.MatplotlibDeprecationWarning, match=r'Complete replacement'):
_api.warn_deprecated('3.10', message='Complete replacement', name='foo',
alternative='bar', addendum='More information.',
obj_type='class', removal='4.0')
def test_deprecate_privatize_attribute() -> None:
class C:
def __init__(self) -> None: self._attr = 1
def _meth(self, arg: T) -> T: return arg
attr: int = _api.deprecate_privatize_attribute("0.0")
meth: Callable = _api.deprecate_privatize_attribute("0.0")
c = C()
with pytest.warns(mpl.MatplotlibDeprecationWarning):
assert c.attr == 1
with pytest.warns(mpl.MatplotlibDeprecationWarning):
c.attr = 2
with pytest.warns(mpl.MatplotlibDeprecationWarning):
assert c.attr == 2
with pytest.warns(mpl.MatplotlibDeprecationWarning):
assert c.meth(42) == 42
def test_delete_parameter() -> None:
@_api.delete_parameter("3.0", "foo")
def func1(foo: Any = None) -> None:
pass
@_api.delete_parameter("3.0", "foo")
def func2(**kwargs: Any) -> None:
pass
for func in [func1, func2]: # type: ignore[list-item]
func() # No warning.
with pytest.warns(mpl.MatplotlibDeprecationWarning):
func(foo="bar")
def pyplot_wrapper(foo: Any = _api.deprecation._deprecated_parameter) -> None:
func1(foo)
pyplot_wrapper() # No warning.
with pytest.warns(mpl.MatplotlibDeprecationWarning):
func(foo="bar")
def test_make_keyword_only() -> None:
@_api.make_keyword_only("3.0", "arg")
def func(pre: Any, arg: Any, post: Any = None) -> None:
pass
func(1, arg=2) # Check that no warning is emitted.
with pytest.warns(mpl.MatplotlibDeprecationWarning):
func(1, 2)
with pytest.warns(mpl.MatplotlibDeprecationWarning):
func(1, 2, 3)
def test_deprecation_alternative() -> None:
alternative = "`.f1`, `f2`, `f3(x) <.f3>` or `f4(x)<f4>`"
@_api.deprecated("1", alternative=alternative)
def f() -> None:
pass
if f.__doc__ is None:
pytest.skip('Documentation is disabled')
assert alternative in f.__doc__
def test_empty_check_in_list() -> None:
with pytest.raises(TypeError, match="No argument to check!"):
_api.check_in_list(["a"])

Some files were not shown because too many files have changed in this diff Show more