up follow livre
This commit is contained in:
parent
b4b4398bb0
commit
3a7a3849ae
12242 changed files with 2564461 additions and 6914 deletions
|
@ -0,0 +1,8 @@
|
|||
from networkx.utils.misc import *
|
||||
from networkx.utils.decorators import *
|
||||
from networkx.utils.random_sequence import *
|
||||
from networkx.utils.union_find import *
|
||||
from networkx.utils.rcm import *
|
||||
from networkx.utils.heaps import *
|
||||
from networkx.utils.configs import *
|
||||
from networkx.utils.backends import *
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
2143
venv/lib/python3.13/site-packages/networkx/utils/backends.py
Normal file
2143
venv/lib/python3.13/site-packages/networkx/utils/backends.py
Normal file
File diff suppressed because it is too large
Load diff
391
venv/lib/python3.13/site-packages/networkx/utils/configs.py
Normal file
391
venv/lib/python3.13/site-packages/networkx/utils/configs.py
Normal file
|
@ -0,0 +1,391 @@
|
|||
import collections
|
||||
import typing
|
||||
from dataclasses import dataclass
|
||||
|
||||
__all__ = ["Config"]
|
||||
|
||||
|
||||
@dataclass(init=False, eq=False, slots=True, kw_only=True, match_args=False)
|
||||
class Config:
|
||||
"""The base class for NetworkX configuration.
|
||||
|
||||
There are two ways to use this to create configurations. The recommended way
|
||||
is to subclass ``Config`` with docs and annotations.
|
||||
|
||||
>>> class MyConfig(Config):
|
||||
... '''Breakfast!'''
|
||||
...
|
||||
... eggs: int
|
||||
... spam: int
|
||||
...
|
||||
... def _on_setattr(self, key, value):
|
||||
... assert isinstance(value, int) and value >= 0
|
||||
... return value
|
||||
>>> cfg = MyConfig(eggs=1, spam=5)
|
||||
|
||||
Another way is to simply pass the initial configuration as keyword arguments to
|
||||
the ``Config`` instance:
|
||||
|
||||
>>> cfg1 = Config(eggs=1, spam=5)
|
||||
>>> cfg1
|
||||
Config(eggs=1, spam=5)
|
||||
|
||||
Once defined, config items may be modified, but can't be added or deleted by default.
|
||||
``Config`` is a ``Mapping``, and can get and set configs via attributes or brackets:
|
||||
|
||||
>>> cfg.eggs = 2
|
||||
>>> cfg.eggs
|
||||
2
|
||||
>>> cfg["spam"] = 42
|
||||
>>> cfg["spam"]
|
||||
42
|
||||
|
||||
For convenience, it can also set configs within a context with the "with" statement:
|
||||
|
||||
>>> with cfg(spam=3):
|
||||
... print("spam (in context):", cfg.spam)
|
||||
spam (in context): 3
|
||||
>>> print("spam (after context):", cfg.spam)
|
||||
spam (after context): 42
|
||||
|
||||
Subclasses may also define ``_on_setattr`` (as done in the example above)
|
||||
to ensure the value being assigned is valid:
|
||||
|
||||
>>> cfg.spam = -1
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError
|
||||
|
||||
If a more flexible configuration object is needed that allows adding and deleting
|
||||
configurations, then pass ``strict=False`` when defining the subclass:
|
||||
|
||||
>>> class FlexibleConfig(Config, strict=False):
|
||||
... default_greeting: str = "Hello"
|
||||
>>> flexcfg = FlexibleConfig()
|
||||
>>> flexcfg.name = "Mr. Anderson"
|
||||
>>> flexcfg
|
||||
FlexibleConfig(default_greeting='Hello', name='Mr. Anderson')
|
||||
"""
|
||||
|
||||
def __init_subclass__(cls, strict=True):
|
||||
cls._strict = strict
|
||||
|
||||
def __new__(cls, **kwargs):
|
||||
orig_class = cls
|
||||
if cls is Config:
|
||||
# Enable the "simple" case of accepting config definition as keywords
|
||||
cls = type(
|
||||
cls.__name__,
|
||||
(cls,),
|
||||
{"__annotations__": dict.fromkeys(kwargs, typing.Any)},
|
||||
)
|
||||
cls = dataclass(
|
||||
eq=False,
|
||||
repr=cls._strict,
|
||||
slots=cls._strict,
|
||||
kw_only=True,
|
||||
match_args=False,
|
||||
)(cls)
|
||||
if not cls._strict:
|
||||
cls.__repr__ = _flexible_repr
|
||||
cls._orig_class = orig_class # Save original class so we can pickle
|
||||
cls._prev = None # Stage previous configs to enable use as context manager
|
||||
cls._context_stack = [] # Stack of previous configs when used as context
|
||||
instance = object.__new__(cls)
|
||||
instance.__init__(**kwargs)
|
||||
return instance
|
||||
|
||||
def _on_setattr(self, key, value):
|
||||
"""Process config value and check whether it is valid. Useful for subclasses."""
|
||||
return value
|
||||
|
||||
def _on_delattr(self, key):
|
||||
"""Callback for when a config item is being deleted. Useful for subclasses."""
|
||||
|
||||
# Control behavior of attributes
|
||||
def __dir__(self):
|
||||
return self.__dataclass_fields__.keys()
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if self._strict and key not in self.__dataclass_fields__:
|
||||
raise AttributeError(f"Invalid config name: {key!r}")
|
||||
value = self._on_setattr(key, value)
|
||||
object.__setattr__(self, key, value)
|
||||
self.__class__._prev = None
|
||||
|
||||
def __delattr__(self, key):
|
||||
if self._strict:
|
||||
raise TypeError(
|
||||
f"Configuration items can't be deleted (can't delete {key!r})."
|
||||
)
|
||||
self._on_delattr(key)
|
||||
object.__delattr__(self, key)
|
||||
self.__class__._prev = None
|
||||
|
||||
# Be a `collection.abc.Collection`
|
||||
def __contains__(self, key):
|
||||
return (
|
||||
key in self.__dataclass_fields__ if self._strict else key in self.__dict__
|
||||
)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__dataclass_fields__ if self._strict else self.__dict__)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.__dataclass_fields__ if self._strict else self.__dict__)
|
||||
|
||||
def __reversed__(self):
|
||||
return reversed(self.__dataclass_fields__ if self._strict else self.__dict__)
|
||||
|
||||
# Add dunder methods for `collections.abc.Mapping`
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return getattr(self, key)
|
||||
except AttributeError as err:
|
||||
raise KeyError(*err.args) from None
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
try:
|
||||
self.__setattr__(key, value)
|
||||
except AttributeError as err:
|
||||
raise KeyError(*err.args) from None
|
||||
|
||||
def __delitem__(self, key):
|
||||
try:
|
||||
self.__delattr__(key)
|
||||
except AttributeError as err:
|
||||
raise KeyError(*err.args) from None
|
||||
|
||||
_ipython_key_completions_ = __dir__ # config["<TAB>
|
||||
|
||||
# Go ahead and make it a `collections.abc.Mapping`
|
||||
def get(self, key, default=None):
|
||||
return getattr(self, key, default)
|
||||
|
||||
def items(self):
|
||||
return collections.abc.ItemsView(self)
|
||||
|
||||
def keys(self):
|
||||
return collections.abc.KeysView(self)
|
||||
|
||||
def values(self):
|
||||
return collections.abc.ValuesView(self)
|
||||
|
||||
# dataclass can define __eq__ for us, but do it here so it works after pickling
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Config):
|
||||
return NotImplemented
|
||||
return self._orig_class == other._orig_class and self.items() == other.items()
|
||||
|
||||
# Make pickle work
|
||||
def __reduce__(self):
|
||||
return self._deserialize, (self._orig_class, dict(self))
|
||||
|
||||
@staticmethod
|
||||
def _deserialize(cls, kwargs):
|
||||
return cls(**kwargs)
|
||||
|
||||
# Allow to be used as context manager
|
||||
def __call__(self, **kwargs):
|
||||
kwargs = {key: self._on_setattr(key, val) for key, val in kwargs.items()}
|
||||
prev = dict(self)
|
||||
for key, val in kwargs.items():
|
||||
setattr(self, key, val)
|
||||
self.__class__._prev = prev
|
||||
return self
|
||||
|
||||
def __enter__(self):
|
||||
if self.__class__._prev is None:
|
||||
raise RuntimeError(
|
||||
"Config being used as a context manager without config items being set. "
|
||||
"Set config items via keyword arguments when calling the config object. "
|
||||
"For example, using config as a context manager should be like:\n\n"
|
||||
' >>> with cfg(breakfast="spam"):\n'
|
||||
" ... ... # Do stuff\n"
|
||||
)
|
||||
self.__class__._context_stack.append(self.__class__._prev)
|
||||
self.__class__._prev = None
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
prev = self.__class__._context_stack.pop()
|
||||
for key, val in prev.items():
|
||||
setattr(self, key, val)
|
||||
|
||||
|
||||
def _flexible_repr(self):
|
||||
return (
|
||||
f"{self.__class__.__qualname__}("
|
||||
+ ", ".join(f"{key}={val!r}" for key, val in self.__dict__.items())
|
||||
+ ")"
|
||||
)
|
||||
|
||||
|
||||
# Register, b/c `Mapping.__subclasshook__` returns `NotImplemented`
|
||||
collections.abc.Mapping.register(Config)
|
||||
|
||||
|
||||
class BackendPriorities(Config, strict=False):
|
||||
"""Configuration to control automatic conversion to and calling of backends.
|
||||
|
||||
Priority is given to backends listed earlier.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
algos : list of backend names
|
||||
This controls "algorithms" such as ``nx.pagerank`` that don't return a graph.
|
||||
generators : list of backend names
|
||||
This controls "generators" such as ``nx.from_pandas_edgelist`` that return a graph.
|
||||
kwargs : variadic keyword arguments of function name to list of backend names
|
||||
This allows each function to be configured separately and will override the config
|
||||
in ``algos`` or ``generators`` if present. The dispatchable function name may be
|
||||
gotten from the ``.name`` attribute such as ``nx.pagerank.name`` (it's typically
|
||||
the same as the name of the function).
|
||||
"""
|
||||
|
||||
algos: list[str]
|
||||
generators: list[str]
|
||||
|
||||
def _on_setattr(self, key, value):
|
||||
from .backends import _registered_algorithms, backend_info
|
||||
|
||||
if key in {"algos", "generators"}:
|
||||
pass
|
||||
elif key not in _registered_algorithms:
|
||||
raise AttributeError(
|
||||
f"Invalid config name: {key!r}. Expected 'algos', 'generators', or a name "
|
||||
"of a dispatchable function (e.g. `.name` attribute of the function)."
|
||||
)
|
||||
if not (isinstance(value, list) and all(isinstance(x, str) for x in value)):
|
||||
raise TypeError(
|
||||
f"{key!r} config must be a list of backend names; got {value!r}"
|
||||
)
|
||||
if missing := {x for x in value if x not in backend_info}:
|
||||
missing = ", ".join(map(repr, sorted(missing)))
|
||||
raise ValueError(f"Unknown backend when setting {key!r}: {missing}")
|
||||
return value
|
||||
|
||||
def _on_delattr(self, key):
|
||||
if key in {"algos", "generators"}:
|
||||
raise TypeError(f"{key!r} configuration item can't be deleted.")
|
||||
|
||||
|
||||
class NetworkXConfig(Config):
|
||||
"""Configuration for NetworkX that controls behaviors such as how to use backends.
|
||||
|
||||
Attribute and bracket notation are supported for getting and setting configurations::
|
||||
|
||||
>>> nx.config.backend_priority == nx.config["backend_priority"]
|
||||
True
|
||||
|
||||
Parameters
|
||||
----------
|
||||
backend_priority : list of backend names or dict or BackendPriorities
|
||||
Enable automatic conversion of graphs to backend graphs for functions
|
||||
implemented by the backend. Priority is given to backends listed earlier.
|
||||
This is a nested configuration with keys ``algos``, ``generators``, and,
|
||||
optionally, function names. Setting this value to a list of backend names
|
||||
will set ``nx.config.backend_priority.algos``. For more information, see
|
||||
``help(nx.config.backend_priority)``. Default is empty list.
|
||||
|
||||
backends : Config mapping of backend names to backend Config
|
||||
The keys of the Config mapping are names of all installed NetworkX backends,
|
||||
and the values are their configurations as Config mappings.
|
||||
|
||||
cache_converted_graphs : bool
|
||||
If True, then save converted graphs to the cache of the input graph. Graph
|
||||
conversion may occur when automatically using a backend from `backend_priority`
|
||||
or when using the `backend=` keyword argument to a function call. Caching can
|
||||
improve performance by avoiding repeated conversions, but it uses more memory.
|
||||
Care should be taken to not manually mutate a graph that has cached graphs; for
|
||||
example, ``G[u][v][k] = val`` changes the graph, but does not clear the cache.
|
||||
Using methods such as ``G.add_edge(u, v, weight=val)`` will clear the cache to
|
||||
keep it consistent. ``G.__networkx_cache__.clear()`` manually clears the cache.
|
||||
Default is True.
|
||||
|
||||
fallback_to_nx : bool
|
||||
If True, then "fall back" and run with the default "networkx" implementation
|
||||
for dispatchable functions not implemented by backends of input graphs. When a
|
||||
backend graph is passed to a dispatchable function, the default behavior is to
|
||||
use the implementation from that backend if possible and raise if not. Enabling
|
||||
``fallback_to_nx`` makes the networkx implementation the fallback to use instead
|
||||
of raising, and will convert the backend graph to a networkx-compatible graph.
|
||||
Default is False.
|
||||
|
||||
warnings_to_ignore : set of strings
|
||||
Control which warnings from NetworkX are not emitted. Valid elements:
|
||||
|
||||
- `"cache"`: when a cached value is used from ``G.__networkx_cache__``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Environment variables may be used to control some default configurations:
|
||||
|
||||
- ``NETWORKX_BACKEND_PRIORITY``: set ``backend_priority.algos`` from comma-separated names.
|
||||
- ``NETWORKX_CACHE_CONVERTED_GRAPHS``: set ``cache_converted_graphs`` to True if nonempty.
|
||||
- ``NETWORKX_FALLBACK_TO_NX``: set ``fallback_to_nx`` to True if nonempty.
|
||||
- ``NETWORKX_WARNINGS_TO_IGNORE``: set `warnings_to_ignore` from comma-separated names.
|
||||
|
||||
and can be used for finer control of ``backend_priority`` such as:
|
||||
|
||||
- ``NETWORKX_BACKEND_PRIORITY_ALGOS``: same as ``NETWORKX_BACKEND_PRIORITY``
|
||||
to set ``backend_priority.algos``.
|
||||
|
||||
This is a global configuration. Use with caution when using from multiple threads.
|
||||
"""
|
||||
|
||||
backend_priority: BackendPriorities
|
||||
backends: Config
|
||||
cache_converted_graphs: bool
|
||||
fallback_to_nx: bool
|
||||
warnings_to_ignore: set[str]
|
||||
|
||||
def _on_setattr(self, key, value):
|
||||
from .backends import backend_info
|
||||
|
||||
if key == "backend_priority":
|
||||
if isinstance(value, list):
|
||||
# `config.backend_priority = [backend]` sets `backend_priority.algos`
|
||||
value = BackendPriorities(
|
||||
**dict(
|
||||
self.backend_priority,
|
||||
algos=self.backend_priority._on_setattr("algos", value),
|
||||
)
|
||||
)
|
||||
elif isinstance(value, dict):
|
||||
kwargs = value
|
||||
value = BackendPriorities(algos=[], generators=[])
|
||||
for key, val in kwargs.items():
|
||||
setattr(value, key, val)
|
||||
elif not isinstance(value, BackendPriorities):
|
||||
raise TypeError(
|
||||
f"{key!r} config must be a dict of lists of backend names; got {value!r}"
|
||||
)
|
||||
elif key == "backends":
|
||||
if not (
|
||||
isinstance(value, Config)
|
||||
and all(isinstance(key, str) for key in value)
|
||||
and all(isinstance(val, Config) for val in value.values())
|
||||
):
|
||||
raise TypeError(
|
||||
f"{key!r} config must be a Config of backend configs; got {value!r}"
|
||||
)
|
||||
if missing := {x for x in value if x not in backend_info}:
|
||||
missing = ", ".join(map(repr, sorted(missing)))
|
||||
raise ValueError(f"Unknown backend when setting {key!r}: {missing}")
|
||||
elif key in {"cache_converted_graphs", "fallback_to_nx"}:
|
||||
if not isinstance(value, bool):
|
||||
raise TypeError(f"{key!r} config must be True or False; got {value!r}")
|
||||
elif key == "warnings_to_ignore":
|
||||
if not (isinstance(value, set) and all(isinstance(x, str) for x in value)):
|
||||
raise TypeError(
|
||||
f"{key!r} config must be a set of warning names; got {value!r}"
|
||||
)
|
||||
known_warnings = {"cache"}
|
||||
if missing := {x for x in value if x not in known_warnings}:
|
||||
missing = ", ".join(map(repr, sorted(missing)))
|
||||
raise ValueError(
|
||||
f"Unknown warning when setting {key!r}: {missing}. Valid entries: "
|
||||
+ ", ".join(sorted(known_warnings))
|
||||
)
|
||||
return value
|
1233
venv/lib/python3.13/site-packages/networkx/utils/decorators.py
Normal file
1233
venv/lib/python3.13/site-packages/networkx/utils/decorators.py
Normal file
File diff suppressed because it is too large
Load diff
338
venv/lib/python3.13/site-packages/networkx/utils/heaps.py
Normal file
338
venv/lib/python3.13/site-packages/networkx/utils/heaps.py
Normal file
|
@ -0,0 +1,338 @@
|
|||
"""
|
||||
Min-heaps.
|
||||
"""
|
||||
|
||||
from heapq import heappop, heappush
|
||||
from itertools import count
|
||||
|
||||
import networkx as nx
|
||||
|
||||
__all__ = ["MinHeap", "PairingHeap", "BinaryHeap"]
|
||||
|
||||
|
||||
class MinHeap:
|
||||
"""Base class for min-heaps.
|
||||
|
||||
A MinHeap stores a collection of key-value pairs ordered by their values.
|
||||
It supports querying the minimum pair, inserting a new pair, decreasing the
|
||||
value in an existing pair and deleting the minimum pair.
|
||||
"""
|
||||
|
||||
class _Item:
|
||||
"""Used by subclassess to represent a key-value pair."""
|
||||
|
||||
__slots__ = ("key", "value")
|
||||
|
||||
def __init__(self, key, value):
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
def __repr__(self):
|
||||
return repr((self.key, self.value))
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize a new min-heap."""
|
||||
self._dict = {}
|
||||
|
||||
def min(self):
|
||||
"""Query the minimum key-value pair.
|
||||
|
||||
Returns
|
||||
-------
|
||||
key, value : tuple
|
||||
The key-value pair with the minimum value in the heap.
|
||||
|
||||
Raises
|
||||
------
|
||||
NetworkXError
|
||||
If the heap is empty.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def pop(self):
|
||||
"""Delete the minimum pair in the heap.
|
||||
|
||||
Returns
|
||||
-------
|
||||
key, value : tuple
|
||||
The key-value pair with the minimum value in the heap.
|
||||
|
||||
Raises
|
||||
------
|
||||
NetworkXError
|
||||
If the heap is empty.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Returns the value associated with a key.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
key : hashable object
|
||||
The key to be looked up.
|
||||
|
||||
default : object
|
||||
Default value to return if the key is not present in the heap.
|
||||
Default value: None.
|
||||
|
||||
Returns
|
||||
-------
|
||||
value : object.
|
||||
The value associated with the key.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def insert(self, key, value, allow_increase=False):
|
||||
"""Insert a new key-value pair or modify the value in an existing
|
||||
pair.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
key : hashable object
|
||||
The key.
|
||||
|
||||
value : object comparable with existing values.
|
||||
The value.
|
||||
|
||||
allow_increase : bool
|
||||
Whether the value is allowed to increase. If False, attempts to
|
||||
increase an existing value have no effect. Default value: False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
decreased : bool
|
||||
True if a pair is inserted or the existing value is decreased.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def __nonzero__(self):
|
||||
"""Returns whether the heap if empty."""
|
||||
return bool(self._dict)
|
||||
|
||||
def __bool__(self):
|
||||
"""Returns whether the heap if empty."""
|
||||
return bool(self._dict)
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the number of key-value pairs in the heap."""
|
||||
return len(self._dict)
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Returns whether a key exists in the heap.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
key : any hashable object.
|
||||
The key to be looked up.
|
||||
"""
|
||||
return key in self._dict
|
||||
|
||||
|
||||
class PairingHeap(MinHeap):
|
||||
"""A pairing heap."""
|
||||
|
||||
class _Node(MinHeap._Item):
|
||||
"""A node in a pairing heap.
|
||||
|
||||
A tree in a pairing heap is stored using the left-child, right-sibling
|
||||
representation.
|
||||
"""
|
||||
|
||||
__slots__ = ("left", "next", "prev", "parent")
|
||||
|
||||
def __init__(self, key, value):
|
||||
super().__init__(key, value)
|
||||
# The leftmost child.
|
||||
self.left = None
|
||||
# The next sibling.
|
||||
self.next = None
|
||||
# The previous sibling.
|
||||
self.prev = None
|
||||
# The parent.
|
||||
self.parent = None
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize a pairing heap."""
|
||||
super().__init__()
|
||||
self._root = None
|
||||
|
||||
def min(self):
|
||||
if self._root is None:
|
||||
raise nx.NetworkXError("heap is empty.")
|
||||
return (self._root.key, self._root.value)
|
||||
|
||||
def pop(self):
|
||||
if self._root is None:
|
||||
raise nx.NetworkXError("heap is empty.")
|
||||
min_node = self._root
|
||||
self._root = self._merge_children(self._root)
|
||||
del self._dict[min_node.key]
|
||||
return (min_node.key, min_node.value)
|
||||
|
||||
def get(self, key, default=None):
|
||||
node = self._dict.get(key)
|
||||
return node.value if node is not None else default
|
||||
|
||||
def insert(self, key, value, allow_increase=False):
|
||||
node = self._dict.get(key)
|
||||
root = self._root
|
||||
if node is not None:
|
||||
if value < node.value:
|
||||
node.value = value
|
||||
if node is not root and value < node.parent.value:
|
||||
self._cut(node)
|
||||
self._root = self._link(root, node)
|
||||
return True
|
||||
elif allow_increase and value > node.value:
|
||||
node.value = value
|
||||
child = self._merge_children(node)
|
||||
# Nonstandard step: Link the merged subtree with the root. See
|
||||
# below for the standard step.
|
||||
if child is not None:
|
||||
self._root = self._link(self._root, child)
|
||||
# Standard step: Perform a decrease followed by a pop as if the
|
||||
# value were the smallest in the heap. Then insert the new
|
||||
# value into the heap.
|
||||
# if node is not root:
|
||||
# self._cut(node)
|
||||
# if child is not None:
|
||||
# root = self._link(root, child)
|
||||
# self._root = self._link(root, node)
|
||||
# else:
|
||||
# self._root = (self._link(node, child)
|
||||
# if child is not None else node)
|
||||
return False
|
||||
else:
|
||||
# Insert a new key.
|
||||
node = self._Node(key, value)
|
||||
self._dict[key] = node
|
||||
self._root = self._link(root, node) if root is not None else node
|
||||
return True
|
||||
|
||||
def _link(self, root, other):
|
||||
"""Link two nodes, making the one with the smaller value the parent of
|
||||
the other.
|
||||
"""
|
||||
if other.value < root.value:
|
||||
root, other = other, root
|
||||
next = root.left
|
||||
other.next = next
|
||||
if next is not None:
|
||||
next.prev = other
|
||||
other.prev = None
|
||||
root.left = other
|
||||
other.parent = root
|
||||
return root
|
||||
|
||||
def _merge_children(self, root):
|
||||
"""Merge the subtrees of the root using the standard two-pass method.
|
||||
The resulting subtree is detached from the root.
|
||||
"""
|
||||
node = root.left
|
||||
root.left = None
|
||||
if node is not None:
|
||||
link = self._link
|
||||
# Pass 1: Merge pairs of consecutive subtrees from left to right.
|
||||
# At the end of the pass, only the prev pointers of the resulting
|
||||
# subtrees have meaningful values. The other pointers will be fixed
|
||||
# in pass 2.
|
||||
prev = None
|
||||
while True:
|
||||
next = node.next
|
||||
if next is None:
|
||||
node.prev = prev
|
||||
break
|
||||
next_next = next.next
|
||||
node = link(node, next)
|
||||
node.prev = prev
|
||||
prev = node
|
||||
if next_next is None:
|
||||
break
|
||||
node = next_next
|
||||
# Pass 2: Successively merge the subtrees produced by pass 1 from
|
||||
# right to left with the rightmost one.
|
||||
prev = node.prev
|
||||
while prev is not None:
|
||||
prev_prev = prev.prev
|
||||
node = link(prev, node)
|
||||
prev = prev_prev
|
||||
# Now node can become the new root. Its has no parent nor siblings.
|
||||
node.prev = None
|
||||
node.next = None
|
||||
node.parent = None
|
||||
return node
|
||||
|
||||
def _cut(self, node):
|
||||
"""Cut a node from its parent."""
|
||||
prev = node.prev
|
||||
next = node.next
|
||||
if prev is not None:
|
||||
prev.next = next
|
||||
else:
|
||||
node.parent.left = next
|
||||
node.prev = None
|
||||
if next is not None:
|
||||
next.prev = prev
|
||||
node.next = None
|
||||
node.parent = None
|
||||
|
||||
|
||||
class BinaryHeap(MinHeap):
|
||||
"""A binary heap."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize a binary heap."""
|
||||
super().__init__()
|
||||
self._heap = []
|
||||
self._count = count()
|
||||
|
||||
def min(self):
|
||||
dict = self._dict
|
||||
if not dict:
|
||||
raise nx.NetworkXError("heap is empty")
|
||||
heap = self._heap
|
||||
# Repeatedly remove stale key-value pairs until a up-to-date one is
|
||||
# met.
|
||||
while True:
|
||||
value, _, key = heap[0]
|
||||
if key in dict and value == dict[key]:
|
||||
break
|
||||
heappop(heap)
|
||||
return (key, value)
|
||||
|
||||
def pop(self):
|
||||
dict = self._dict
|
||||
if not dict:
|
||||
raise nx.NetworkXError("heap is empty")
|
||||
heap = self._heap
|
||||
# Repeatedly remove stale key-value pairs until a up-to-date one is
|
||||
# met.
|
||||
while True:
|
||||
value, _, key = heap[0]
|
||||
heappop(heap)
|
||||
if key in dict and value == dict[key]:
|
||||
break
|
||||
del dict[key]
|
||||
return (key, value)
|
||||
|
||||
def get(self, key, default=None):
|
||||
return self._dict.get(key, default)
|
||||
|
||||
def insert(self, key, value, allow_increase=False):
|
||||
dict = self._dict
|
||||
if key in dict:
|
||||
old_value = dict[key]
|
||||
if value < old_value or (allow_increase and value > old_value):
|
||||
# Since there is no way to efficiently obtain the location of a
|
||||
# key-value pair in the heap, insert a new pair even if ones
|
||||
# with the same key may already be present. Deem the old ones
|
||||
# as stale and skip them when the minimum pair is queried.
|
||||
dict[key] = value
|
||||
heappush(self._heap, (value, next(self._count), key))
|
||||
return value < old_value
|
||||
return False
|
||||
else:
|
||||
dict[key] = value
|
||||
heappush(self._heap, (value, next(self._count), key))
|
||||
return True
|
297
venv/lib/python3.13/site-packages/networkx/utils/mapped_queue.py
Normal file
297
venv/lib/python3.13/site-packages/networkx/utils/mapped_queue.py
Normal file
|
@ -0,0 +1,297 @@
|
|||
"""Priority queue class with updatable priorities."""
|
||||
|
||||
import heapq
|
||||
|
||||
__all__ = ["MappedQueue"]
|
||||
|
||||
|
||||
class _HeapElement:
|
||||
"""This proxy class separates the heap element from its priority.
|
||||
|
||||
The idea is that using a 2-tuple (priority, element) works
|
||||
for sorting, but not for dict lookup because priorities are
|
||||
often floating point values so round-off can mess up equality.
|
||||
|
||||
So, we need inequalities to look at the priority (for sorting)
|
||||
and equality (and hash) to look at the element to enable
|
||||
updates to the priority.
|
||||
|
||||
Unfortunately, this class can be tricky to work with if you forget that
|
||||
`__lt__` compares the priority while `__eq__` compares the element.
|
||||
In `greedy_modularity_communities()` the following code is
|
||||
used to check that two _HeapElements differ in either element or priority:
|
||||
|
||||
if d_oldmax != row_max or d_oldmax.priority != row_max.priority:
|
||||
|
||||
If the priorities are the same, this implementation uses the element
|
||||
as a tiebreaker. This provides compatibility with older systems that
|
||||
use tuples to combine priority and elements.
|
||||
"""
|
||||
|
||||
__slots__ = ["priority", "element", "_hash"]
|
||||
|
||||
def __init__(self, priority, element):
|
||||
self.priority = priority
|
||||
self.element = element
|
||||
self._hash = hash(element)
|
||||
|
||||
def __lt__(self, other):
|
||||
try:
|
||||
other_priority = other.priority
|
||||
except AttributeError:
|
||||
return self.priority < other
|
||||
# assume comparing to another _HeapElement
|
||||
if self.priority == other_priority:
|
||||
try:
|
||||
return self.element < other.element
|
||||
except TypeError as err:
|
||||
raise TypeError(
|
||||
"Consider using a tuple, with a priority value that can be compared."
|
||||
)
|
||||
return self.priority < other_priority
|
||||
|
||||
def __gt__(self, other):
|
||||
try:
|
||||
other_priority = other.priority
|
||||
except AttributeError:
|
||||
return self.priority > other
|
||||
# assume comparing to another _HeapElement
|
||||
if self.priority == other_priority:
|
||||
try:
|
||||
return self.element > other.element
|
||||
except TypeError as err:
|
||||
raise TypeError(
|
||||
"Consider using a tuple, with a priority value that can be compared."
|
||||
)
|
||||
return self.priority > other_priority
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return self.element == other.element
|
||||
except AttributeError:
|
||||
return self.element == other
|
||||
|
||||
def __hash__(self):
|
||||
return self._hash
|
||||
|
||||
def __getitem__(self, indx):
|
||||
return self.priority if indx == 0 else self.element[indx - 1]
|
||||
|
||||
def __iter__(self):
|
||||
yield self.priority
|
||||
try:
|
||||
yield from self.element
|
||||
except TypeError:
|
||||
yield self.element
|
||||
|
||||
def __repr__(self):
|
||||
return f"_HeapElement({self.priority}, {self.element})"
|
||||
|
||||
|
||||
class MappedQueue:
|
||||
"""The MappedQueue class implements a min-heap with removal and update-priority.
|
||||
|
||||
The min heap uses heapq as well as custom written _siftup and _siftdown
|
||||
methods to allow the heap positions to be tracked by an additional dict
|
||||
keyed by element to position. The smallest element can be popped in O(1) time,
|
||||
new elements can be pushed in O(log n) time, and any element can be removed
|
||||
or updated in O(log n) time. The queue cannot contain duplicate elements
|
||||
and an attempt to push an element already in the queue will have no effect.
|
||||
|
||||
MappedQueue complements the heapq package from the python standard
|
||||
library. While MappedQueue is designed for maximum compatibility with
|
||||
heapq, it adds element removal, lookup, and priority update.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : dict or iterable
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
A `MappedQueue` can be created empty, or optionally, given a dictionary
|
||||
of initial elements and priorities. The methods `push`, `pop`,
|
||||
`remove`, and `update` operate on the queue.
|
||||
|
||||
>>> colors_nm = {"red": 665, "blue": 470, "green": 550}
|
||||
>>> q = MappedQueue(colors_nm)
|
||||
>>> q.remove("red")
|
||||
>>> q.update("green", "violet", 400)
|
||||
>>> q.push("indigo", 425)
|
||||
True
|
||||
>>> [q.pop().element for i in range(len(q.heap))]
|
||||
['violet', 'indigo', 'blue']
|
||||
|
||||
A `MappedQueue` can also be initialized with a list or other iterable. The priority is assumed
|
||||
to be the sort order of the items in the list.
|
||||
|
||||
>>> q = MappedQueue([916, 50, 4609, 493, 237])
|
||||
>>> q.remove(493)
|
||||
>>> q.update(237, 1117)
|
||||
>>> [q.pop() for i in range(len(q.heap))]
|
||||
[50, 916, 1117, 4609]
|
||||
|
||||
An exception is raised if the elements are not comparable.
|
||||
|
||||
>>> q = MappedQueue([100, "a"])
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
TypeError: '<' not supported between instances of 'int' and 'str'
|
||||
|
||||
To avoid the exception, use a dictionary to assign priorities to the elements.
|
||||
|
||||
>>> q = MappedQueue({100: 0, "a": 1})
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Cormen, T. H., Leiserson, C. E., Rivest, R. L., & Stein, C. (2001).
|
||||
Introduction to algorithms second edition.
|
||||
.. [2] Knuth, D. E. (1997). The art of computer programming (Vol. 3).
|
||||
Pearson Education.
|
||||
"""
|
||||
|
||||
def __init__(self, data=None):
|
||||
"""Priority queue class with updatable priorities."""
|
||||
if data is None:
|
||||
self.heap = []
|
||||
elif isinstance(data, dict):
|
||||
self.heap = [_HeapElement(v, k) for k, v in data.items()]
|
||||
else:
|
||||
self.heap = list(data)
|
||||
self.position = {}
|
||||
self._heapify()
|
||||
|
||||
def _heapify(self):
|
||||
"""Restore heap invariant and recalculate map."""
|
||||
heapq.heapify(self.heap)
|
||||
self.position = {elt: pos for pos, elt in enumerate(self.heap)}
|
||||
if len(self.heap) != len(self.position):
|
||||
raise AssertionError("Heap contains duplicate elements")
|
||||
|
||||
def __len__(self):
|
||||
return len(self.heap)
|
||||
|
||||
def push(self, elt, priority=None):
|
||||
"""Add an element to the queue."""
|
||||
if priority is not None:
|
||||
elt = _HeapElement(priority, elt)
|
||||
# If element is already in queue, do nothing
|
||||
if elt in self.position:
|
||||
return False
|
||||
# Add element to heap and dict
|
||||
pos = len(self.heap)
|
||||
self.heap.append(elt)
|
||||
self.position[elt] = pos
|
||||
# Restore invariant by sifting down
|
||||
self._siftdown(0, pos)
|
||||
return True
|
||||
|
||||
def pop(self):
|
||||
"""Remove and return the smallest element in the queue."""
|
||||
# Remove smallest element
|
||||
elt = self.heap[0]
|
||||
del self.position[elt]
|
||||
# If elt is last item, remove and return
|
||||
if len(self.heap) == 1:
|
||||
self.heap.pop()
|
||||
return elt
|
||||
# Replace root with last element
|
||||
last = self.heap.pop()
|
||||
self.heap[0] = last
|
||||
self.position[last] = 0
|
||||
# Restore invariant by sifting up
|
||||
self._siftup(0)
|
||||
# Return smallest element
|
||||
return elt
|
||||
|
||||
def update(self, elt, new, priority=None):
|
||||
"""Replace an element in the queue with a new one."""
|
||||
if priority is not None:
|
||||
new = _HeapElement(priority, new)
|
||||
# Replace
|
||||
pos = self.position[elt]
|
||||
self.heap[pos] = new
|
||||
del self.position[elt]
|
||||
self.position[new] = pos
|
||||
# Restore invariant by sifting up
|
||||
self._siftup(pos)
|
||||
|
||||
def remove(self, elt):
|
||||
"""Remove an element from the queue."""
|
||||
# Find and remove element
|
||||
try:
|
||||
pos = self.position[elt]
|
||||
del self.position[elt]
|
||||
except KeyError:
|
||||
# Not in queue
|
||||
raise
|
||||
# If elt is last item, remove and return
|
||||
if pos == len(self.heap) - 1:
|
||||
self.heap.pop()
|
||||
return
|
||||
# Replace elt with last element
|
||||
last = self.heap.pop()
|
||||
self.heap[pos] = last
|
||||
self.position[last] = pos
|
||||
# Restore invariant by sifting up
|
||||
self._siftup(pos)
|
||||
|
||||
def _siftup(self, pos):
|
||||
"""Move smaller child up until hitting a leaf.
|
||||
|
||||
Built to mimic code for heapq._siftup
|
||||
only updating position dict too.
|
||||
"""
|
||||
heap, position = self.heap, self.position
|
||||
end_pos = len(heap)
|
||||
startpos = pos
|
||||
newitem = heap[pos]
|
||||
# Shift up the smaller child until hitting a leaf
|
||||
child_pos = (pos << 1) + 1 # start with leftmost child position
|
||||
while child_pos < end_pos:
|
||||
# Set child_pos to index of smaller child.
|
||||
child = heap[child_pos]
|
||||
right_pos = child_pos + 1
|
||||
if right_pos < end_pos:
|
||||
right = heap[right_pos]
|
||||
if not child < right:
|
||||
child = right
|
||||
child_pos = right_pos
|
||||
# Move the smaller child up.
|
||||
heap[pos] = child
|
||||
position[child] = pos
|
||||
pos = child_pos
|
||||
child_pos = (pos << 1) + 1
|
||||
# pos is a leaf position. Put newitem there, and bubble it up
|
||||
# to its final resting place (by sifting its parents down).
|
||||
while pos > 0:
|
||||
parent_pos = (pos - 1) >> 1
|
||||
parent = heap[parent_pos]
|
||||
if not newitem < parent:
|
||||
break
|
||||
heap[pos] = parent
|
||||
position[parent] = pos
|
||||
pos = parent_pos
|
||||
heap[pos] = newitem
|
||||
position[newitem] = pos
|
||||
|
||||
def _siftdown(self, start_pos, pos):
|
||||
"""Restore invariant. keep swapping with parent until smaller.
|
||||
|
||||
Built to mimic code for heapq._siftdown
|
||||
only updating position dict too.
|
||||
"""
|
||||
heap, position = self.heap, self.position
|
||||
newitem = heap[pos]
|
||||
# Follow the path to the root, moving parents down until finding a place
|
||||
# newitem fits.
|
||||
while pos > start_pos:
|
||||
parent_pos = (pos - 1) >> 1
|
||||
parent = heap[parent_pos]
|
||||
if not newitem < parent:
|
||||
break
|
||||
heap[pos] = parent
|
||||
position[parent] = pos
|
||||
pos = parent_pos
|
||||
heap[pos] = newitem
|
||||
position[newitem] = pos
|
651
venv/lib/python3.13/site-packages/networkx/utils/misc.py
Normal file
651
venv/lib/python3.13/site-packages/networkx/utils/misc.py
Normal file
|
@ -0,0 +1,651 @@
|
|||
"""
|
||||
Miscellaneous Helpers for NetworkX.
|
||||
|
||||
These are not imported into the base networkx namespace but
|
||||
can be accessed, for example, as
|
||||
|
||||
>>> import networkx as nx
|
||||
>>> nx.utils.make_list_of_ints({1, 2, 3})
|
||||
[1, 2, 3]
|
||||
>>> nx.utils.arbitrary_element({5, 1, 7}) # doctest: +SKIP
|
||||
1
|
||||
"""
|
||||
|
||||
import random
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterable, Iterator, Sized
|
||||
from itertools import chain, tee
|
||||
|
||||
import networkx as nx
|
||||
|
||||
__all__ = [
|
||||
"flatten",
|
||||
"make_list_of_ints",
|
||||
"dict_to_numpy_array",
|
||||
"arbitrary_element",
|
||||
"pairwise",
|
||||
"groups",
|
||||
"create_random_state",
|
||||
"create_py_random_state",
|
||||
"PythonRandomInterface",
|
||||
"PythonRandomViaNumpyBits",
|
||||
"nodes_equal",
|
||||
"edges_equal",
|
||||
"graphs_equal",
|
||||
"_clear_cache",
|
||||
]
|
||||
|
||||
|
||||
# some cookbook stuff
|
||||
# used in deciding whether something is a bunch of nodes, edges, etc.
|
||||
# see G.add_nodes and others in Graph Class in networkx/base.py
|
||||
|
||||
|
||||
def flatten(obj, result=None):
|
||||
"""Return flattened version of (possibly nested) iterable object."""
|
||||
if not isinstance(obj, Iterable | Sized) or isinstance(obj, str):
|
||||
return obj
|
||||
if result is None:
|
||||
result = []
|
||||
for item in obj:
|
||||
if not isinstance(item, Iterable | Sized) or isinstance(item, str):
|
||||
result.append(item)
|
||||
else:
|
||||
flatten(item, result)
|
||||
return tuple(result)
|
||||
|
||||
|
||||
def make_list_of_ints(sequence):
|
||||
"""Return list of ints from sequence of integral numbers.
|
||||
|
||||
All elements of the sequence must satisfy int(element) == element
|
||||
or a ValueError is raised. Sequence is iterated through once.
|
||||
|
||||
If sequence is a list, the non-int values are replaced with ints.
|
||||
So, no new list is created
|
||||
"""
|
||||
if not isinstance(sequence, list):
|
||||
result = []
|
||||
for i in sequence:
|
||||
errmsg = f"sequence is not all integers: {i}"
|
||||
try:
|
||||
ii = int(i)
|
||||
except ValueError:
|
||||
raise nx.NetworkXError(errmsg) from None
|
||||
if ii != i:
|
||||
raise nx.NetworkXError(errmsg)
|
||||
result.append(ii)
|
||||
return result
|
||||
# original sequence is a list... in-place conversion to ints
|
||||
for indx, i in enumerate(sequence):
|
||||
errmsg = f"sequence is not all integers: {i}"
|
||||
if isinstance(i, int):
|
||||
continue
|
||||
try:
|
||||
ii = int(i)
|
||||
except ValueError:
|
||||
raise nx.NetworkXError(errmsg) from None
|
||||
if ii != i:
|
||||
raise nx.NetworkXError(errmsg)
|
||||
sequence[indx] = ii
|
||||
return sequence
|
||||
|
||||
|
||||
def dict_to_numpy_array(d, mapping=None):
|
||||
"""Convert a dictionary of dictionaries to a numpy array
|
||||
with optional mapping."""
|
||||
try:
|
||||
return _dict_to_numpy_array2(d, mapping)
|
||||
except (AttributeError, TypeError):
|
||||
# AttributeError is when no mapping was provided and v.keys() fails.
|
||||
# TypeError is when a mapping was provided and d[k1][k2] fails.
|
||||
return _dict_to_numpy_array1(d, mapping)
|
||||
|
||||
|
||||
def _dict_to_numpy_array2(d, mapping=None):
|
||||
"""Convert a dictionary of dictionaries to a 2d numpy array
|
||||
with optional mapping.
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
if mapping is None:
|
||||
s = set(d.keys())
|
||||
for k, v in d.items():
|
||||
s.update(v.keys())
|
||||
mapping = dict(zip(s, range(len(s))))
|
||||
n = len(mapping)
|
||||
a = np.zeros((n, n))
|
||||
for k1, i in mapping.items():
|
||||
for k2, j in mapping.items():
|
||||
try:
|
||||
a[i, j] = d[k1][k2]
|
||||
except KeyError:
|
||||
pass
|
||||
return a
|
||||
|
||||
|
||||
def _dict_to_numpy_array1(d, mapping=None):
|
||||
"""Convert a dictionary of numbers to a 1d numpy array with optional mapping."""
|
||||
import numpy as np
|
||||
|
||||
if mapping is None:
|
||||
s = set(d.keys())
|
||||
mapping = dict(zip(s, range(len(s))))
|
||||
n = len(mapping)
|
||||
a = np.zeros(n)
|
||||
for k1, i in mapping.items():
|
||||
i = mapping[k1]
|
||||
a[i] = d[k1]
|
||||
return a
|
||||
|
||||
|
||||
def arbitrary_element(iterable):
|
||||
"""Returns an arbitrary element of `iterable` without removing it.
|
||||
|
||||
This is most useful for "peeking" at an arbitrary element of a set,
|
||||
but can be used for any list, dictionary, etc., as well.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
iterable : `abc.collections.Iterable` instance
|
||||
Any object that implements ``__iter__``, e.g. set, dict, list, tuple,
|
||||
etc.
|
||||
|
||||
Returns
|
||||
-------
|
||||
The object that results from ``next(iter(iterable))``
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If `iterable` is an iterator (because the current implementation of
|
||||
this function would consume an element from the iterator).
|
||||
|
||||
Examples
|
||||
--------
|
||||
Arbitrary elements from common Iterable objects:
|
||||
|
||||
>>> nx.utils.arbitrary_element([1, 2, 3]) # list
|
||||
1
|
||||
>>> nx.utils.arbitrary_element((1, 2, 3)) # tuple
|
||||
1
|
||||
>>> nx.utils.arbitrary_element({1, 2, 3}) # set
|
||||
1
|
||||
>>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])}
|
||||
>>> nx.utils.arbitrary_element(d) # dict_keys
|
||||
1
|
||||
>>> nx.utils.arbitrary_element(d.values()) # dict values
|
||||
3
|
||||
|
||||
`str` is also an Iterable:
|
||||
|
||||
>>> nx.utils.arbitrary_element("hello")
|
||||
'h'
|
||||
|
||||
:exc:`ValueError` is raised if `iterable` is an iterator:
|
||||
|
||||
>>> iterator = iter([1, 2, 3]) # Iterator, *not* Iterable
|
||||
>>> nx.utils.arbitrary_element(iterator)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: cannot return an arbitrary item from an iterator
|
||||
|
||||
Notes
|
||||
-----
|
||||
This function does not return a *random* element. If `iterable` is
|
||||
ordered, sequential calls will return the same value::
|
||||
|
||||
>>> l = [1, 2, 3]
|
||||
>>> nx.utils.arbitrary_element(l)
|
||||
1
|
||||
>>> nx.utils.arbitrary_element(l)
|
||||
1
|
||||
|
||||
"""
|
||||
if isinstance(iterable, Iterator):
|
||||
raise ValueError("cannot return an arbitrary item from an iterator")
|
||||
# Another possible implementation is ``for x in iterable: return x``.
|
||||
return next(iter(iterable))
|
||||
|
||||
|
||||
# Recipe from the itertools documentation.
|
||||
def pairwise(iterable, cyclic=False):
|
||||
"s -> (s0, s1), (s1, s2), (s2, s3), ..."
|
||||
a, b = tee(iterable)
|
||||
first = next(b, None)
|
||||
if cyclic is True:
|
||||
return zip(a, chain(b, (first,)))
|
||||
return zip(a, b)
|
||||
|
||||
|
||||
def groups(many_to_one):
|
||||
"""Converts a many-to-one mapping into a one-to-many mapping.
|
||||
|
||||
`many_to_one` must be a dictionary whose keys and values are all
|
||||
:term:`hashable`.
|
||||
|
||||
The return value is a dictionary mapping values from `many_to_one`
|
||||
to sets of keys from `many_to_one` that have that value.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from networkx.utils import groups
|
||||
>>> many_to_one = {"a": 1, "b": 1, "c": 2, "d": 3, "e": 3}
|
||||
>>> groups(many_to_one) # doctest: +SKIP
|
||||
{1: {'a', 'b'}, 2: {'c'}, 3: {'e', 'd'}}
|
||||
"""
|
||||
one_to_many = defaultdict(set)
|
||||
for v, k in many_to_one.items():
|
||||
one_to_many[k].add(v)
|
||||
return dict(one_to_many)
|
||||
|
||||
|
||||
def create_random_state(random_state=None):
|
||||
"""Returns a numpy.random.RandomState or numpy.random.Generator instance
|
||||
depending on input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
random_state : int or NumPy RandomState or Generator instance, optional (default=None)
|
||||
If int, return a numpy.random.RandomState instance set with seed=int.
|
||||
if `numpy.random.RandomState` instance, return it.
|
||||
if `numpy.random.Generator` instance, return it.
|
||||
if None or numpy.random, return the global random number generator used
|
||||
by numpy.random.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
if random_state is None or random_state is np.random:
|
||||
return np.random.mtrand._rand
|
||||
if isinstance(random_state, np.random.RandomState):
|
||||
return random_state
|
||||
if isinstance(random_state, int):
|
||||
return np.random.RandomState(random_state)
|
||||
if isinstance(random_state, np.random.Generator):
|
||||
return random_state
|
||||
msg = (
|
||||
f"{random_state} cannot be used to create a numpy.random.RandomState or\n"
|
||||
"numpy.random.Generator instance"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class PythonRandomViaNumpyBits(random.Random):
|
||||
"""Provide the random.random algorithms using a numpy.random bit generator
|
||||
|
||||
The intent is to allow people to contribute code that uses Python's random
|
||||
library, but still allow users to provide a single easily controlled random
|
||||
bit-stream for all work with NetworkX. This implementation is based on helpful
|
||||
comments and code from Robert Kern on NumPy's GitHub Issue #24458.
|
||||
|
||||
This implementation supersedes that of `PythonRandomInterface` which rewrote
|
||||
methods to account for subtle differences in API between `random` and
|
||||
`numpy.random`. Instead this subclasses `random.Random` and overwrites
|
||||
the methods `random`, `getrandbits`, `getstate`, `setstate` and `seed`.
|
||||
It makes them use the rng values from an input numpy `RandomState` or `Generator`.
|
||||
Those few methods allow the rest of the `random.Random` methods to provide
|
||||
the API interface of `random.random` while using randomness generated by
|
||||
a numpy generator.
|
||||
"""
|
||||
|
||||
def __init__(self, rng=None):
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
msg = "numpy not found, only random.random available."
|
||||
warnings.warn(msg, ImportWarning)
|
||||
|
||||
if rng is None:
|
||||
self._rng = np.random.mtrand._rand
|
||||
else:
|
||||
self._rng = rng
|
||||
|
||||
# Not necessary, given our overriding of gauss() below, but it's
|
||||
# in the superclass and nominally public, so initialize it here.
|
||||
self.gauss_next = None
|
||||
|
||||
def random(self):
|
||||
"""Get the next random number in the range 0.0 <= X < 1.0."""
|
||||
return self._rng.random()
|
||||
|
||||
def getrandbits(self, k):
|
||||
"""getrandbits(k) -> x. Generates an int with k random bits."""
|
||||
if k < 0:
|
||||
raise ValueError("number of bits must be non-negative")
|
||||
numbytes = (k + 7) // 8 # bits / 8 and rounded up
|
||||
x = int.from_bytes(self._rng.bytes(numbytes), "big")
|
||||
return x >> (numbytes * 8 - k) # trim excess bits
|
||||
|
||||
def getstate(self):
|
||||
return self._rng.__getstate__()
|
||||
|
||||
def setstate(self, state):
|
||||
self._rng.__setstate__(state)
|
||||
|
||||
def seed(self, *args, **kwds):
|
||||
"Do nothing override method."
|
||||
raise NotImplementedError("seed() not implemented in PythonRandomViaNumpyBits")
|
||||
|
||||
|
||||
##################################################################
|
||||
class PythonRandomInterface:
|
||||
"""PythonRandomInterface is included for backward compatibility
|
||||
New code should use PythonRandomViaNumpyBits instead.
|
||||
"""
|
||||
|
||||
def __init__(self, rng=None):
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
msg = "numpy not found, only random.random available."
|
||||
warnings.warn(msg, ImportWarning)
|
||||
|
||||
if rng is None:
|
||||
self._rng = np.random.mtrand._rand
|
||||
else:
|
||||
self._rng = rng
|
||||
|
||||
def random(self):
|
||||
return self._rng.random()
|
||||
|
||||
def uniform(self, a, b):
|
||||
return a + (b - a) * self._rng.random()
|
||||
|
||||
def randrange(self, a, b=None):
|
||||
import numpy as np
|
||||
|
||||
if b is None:
|
||||
a, b = 0, a
|
||||
if b > 9223372036854775807: # from np.iinfo(np.int64).max
|
||||
tmp_rng = PythonRandomViaNumpyBits(self._rng)
|
||||
return tmp_rng.randrange(a, b)
|
||||
|
||||
if isinstance(self._rng, np.random.Generator):
|
||||
return self._rng.integers(a, b)
|
||||
return self._rng.randint(a, b)
|
||||
|
||||
# NOTE: the numpy implementations of `choice` don't support strings, so
|
||||
# this cannot be replaced with self._rng.choice
|
||||
def choice(self, seq):
|
||||
import numpy as np
|
||||
|
||||
if isinstance(self._rng, np.random.Generator):
|
||||
idx = self._rng.integers(0, len(seq))
|
||||
else:
|
||||
idx = self._rng.randint(0, len(seq))
|
||||
return seq[idx]
|
||||
|
||||
def gauss(self, mu, sigma):
|
||||
return self._rng.normal(mu, sigma)
|
||||
|
||||
def shuffle(self, seq):
|
||||
return self._rng.shuffle(seq)
|
||||
|
||||
# Some methods don't match API for numpy RandomState.
|
||||
# Commented out versions are not used by NetworkX
|
||||
|
||||
def sample(self, seq, k):
|
||||
return self._rng.choice(list(seq), size=(k,), replace=False)
|
||||
|
||||
def randint(self, a, b):
|
||||
import numpy as np
|
||||
|
||||
if b > 9223372036854775807: # from np.iinfo(np.int64).max
|
||||
tmp_rng = PythonRandomViaNumpyBits(self._rng)
|
||||
return tmp_rng.randint(a, b)
|
||||
|
||||
if isinstance(self._rng, np.random.Generator):
|
||||
return self._rng.integers(a, b + 1)
|
||||
return self._rng.randint(a, b + 1)
|
||||
|
||||
# exponential as expovariate with 1/argument,
|
||||
def expovariate(self, scale):
|
||||
return self._rng.exponential(1 / scale)
|
||||
|
||||
# pareto as paretovariate with 1/argument,
|
||||
def paretovariate(self, shape):
|
||||
return self._rng.pareto(shape)
|
||||
|
||||
|
||||
# weibull as weibullvariate multiplied by beta,
|
||||
# def weibullvariate(self, alpha, beta):
|
||||
# return self._rng.weibull(alpha) * beta
|
||||
#
|
||||
# def triangular(self, low, high, mode):
|
||||
# return self._rng.triangular(low, mode, high)
|
||||
#
|
||||
# def choices(self, seq, weights=None, cum_weights=None, k=1):
|
||||
# return self._rng.choice(seq
|
||||
|
||||
|
||||
def create_py_random_state(random_state=None):
|
||||
"""Returns a random.Random instance depending on input.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
random_state : int or random number generator or None (default=None)
|
||||
- If int, return a `random.Random` instance set with seed=int.
|
||||
- If `random.Random` instance, return it.
|
||||
- If None or the `np.random` package, return the global random number
|
||||
generator used by `np.random`.
|
||||
- If an `np.random.Generator` instance, or the `np.random` package, or
|
||||
the global numpy random number generator, then return it.
|
||||
wrapped in a `PythonRandomViaNumpyBits` class.
|
||||
- If a `PythonRandomViaNumpyBits` instance, return it.
|
||||
- If a `PythonRandomInterface` instance, return it.
|
||||
- If a `np.random.RandomState` instance and not the global numpy default,
|
||||
return it wrapped in `PythonRandomInterface` for backward bit-stream
|
||||
matching with legacy code.
|
||||
|
||||
Notes
|
||||
-----
|
||||
- A diagram intending to illustrate the relationships behind our support
|
||||
for numpy random numbers is called
|
||||
`NetworkX Numpy Random Numbers <https://excalidraw.com/#room=b5303f2b03d3af7ccc6a,e5ZDIWdWWCTTsg8OqoRvPA>`_.
|
||||
- More discussion about this support also appears in
|
||||
`gh-6869#comment <https://github.com/networkx/networkx/pull/6869#issuecomment-1944799534>`_.
|
||||
- Wrappers of numpy.random number generators allow them to mimic the Python random
|
||||
number generation algorithms. For example, Python can create arbitrarily large
|
||||
random ints, and the wrappers use Numpy bit-streams with CPython's random module
|
||||
to choose arbitrarily large random integers too.
|
||||
- We provide two wrapper classes:
|
||||
`PythonRandomViaNumpyBits` is usually what you want and is always used for
|
||||
`np.Generator` instances. But for users who need to recreate random numbers
|
||||
produced in NetworkX 3.2 or earlier, we maintain the `PythonRandomInterface`
|
||||
wrapper as well. We use it only used if passed a (non-default) `np.RandomState`
|
||||
instance pre-initialized from a seed. Otherwise the newer wrapper is used.
|
||||
"""
|
||||
if random_state is None or random_state is random:
|
||||
return random._inst
|
||||
if isinstance(random_state, random.Random):
|
||||
return random_state
|
||||
if isinstance(random_state, int):
|
||||
return random.Random(random_state)
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
if isinstance(random_state, PythonRandomInterface | PythonRandomViaNumpyBits):
|
||||
return random_state
|
||||
if isinstance(random_state, np.random.Generator):
|
||||
return PythonRandomViaNumpyBits(random_state)
|
||||
if random_state is np.random:
|
||||
return PythonRandomViaNumpyBits(np.random.mtrand._rand)
|
||||
|
||||
if isinstance(random_state, np.random.RandomState):
|
||||
if random_state is np.random.mtrand._rand:
|
||||
return PythonRandomViaNumpyBits(random_state)
|
||||
# Only need older interface if specially constructed RandomState used
|
||||
return PythonRandomInterface(random_state)
|
||||
|
||||
msg = f"{random_state} cannot be used to generate a random.Random instance"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def nodes_equal(nodes1, nodes2):
|
||||
"""Check if nodes are equal.
|
||||
|
||||
Equality here means equal as Python objects.
|
||||
Node data must match if included.
|
||||
The order of nodes is not relevant.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
nodes1, nodes2 : iterables of nodes, or (node, datadict) tuples
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if nodes are equal, False otherwise.
|
||||
"""
|
||||
nlist1 = list(nodes1)
|
||||
nlist2 = list(nodes2)
|
||||
try:
|
||||
d1 = dict(nlist1)
|
||||
d2 = dict(nlist2)
|
||||
except (ValueError, TypeError):
|
||||
d1 = dict.fromkeys(nlist1)
|
||||
d2 = dict.fromkeys(nlist2)
|
||||
return d1 == d2
|
||||
|
||||
|
||||
def edges_equal(edges1, edges2):
|
||||
"""Check if edges are equal.
|
||||
|
||||
Equality here means equal as Python objects.
|
||||
Edge data must match if included.
|
||||
The order of the edges is not relevant.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
edges1, edges2 : iterables of with u, v nodes as
|
||||
edge tuples (u, v), or
|
||||
edge tuples with data dicts (u, v, d), or
|
||||
edge tuples with keys and data dicts (u, v, k, d)
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if edges are equal, False otherwise.
|
||||
"""
|
||||
from collections import defaultdict
|
||||
|
||||
d1 = defaultdict(dict)
|
||||
d2 = defaultdict(dict)
|
||||
c1 = 0
|
||||
for c1, e in enumerate(edges1):
|
||||
u, v = e[0], e[1]
|
||||
data = [e[2:]]
|
||||
if v in d1[u]:
|
||||
data = d1[u][v] + data
|
||||
d1[u][v] = data
|
||||
d1[v][u] = data
|
||||
c2 = 0
|
||||
for c2, e in enumerate(edges2):
|
||||
u, v = e[0], e[1]
|
||||
data = [e[2:]]
|
||||
if v in d2[u]:
|
||||
data = d2[u][v] + data
|
||||
d2[u][v] = data
|
||||
d2[v][u] = data
|
||||
if c1 != c2:
|
||||
return False
|
||||
# can check one direction because lengths are the same.
|
||||
for n, nbrdict in d1.items():
|
||||
for nbr, datalist in nbrdict.items():
|
||||
if n not in d2:
|
||||
return False
|
||||
if nbr not in d2[n]:
|
||||
return False
|
||||
d2datalist = d2[n][nbr]
|
||||
for data in datalist:
|
||||
if datalist.count(data) != d2datalist.count(data):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def graphs_equal(graph1, graph2):
|
||||
"""Check if graphs are equal.
|
||||
|
||||
Equality here means equal as Python objects (not isomorphism).
|
||||
Node, edge and graph data must match.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
graph1, graph2 : graph
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool
|
||||
True if graphs are equal, False otherwise.
|
||||
"""
|
||||
return (
|
||||
graph1.adj == graph2.adj
|
||||
and graph1.nodes == graph2.nodes
|
||||
and graph1.graph == graph2.graph
|
||||
)
|
||||
|
||||
|
||||
def _clear_cache(G):
|
||||
"""Clear the cache of a graph (currently stores converted graphs).
|
||||
|
||||
Caching is controlled via ``nx.config.cache_converted_graphs`` configuration.
|
||||
"""
|
||||
if cache := getattr(G, "__networkx_cache__", None):
|
||||
cache.clear()
|
||||
|
||||
|
||||
def check_create_using(create_using, *, directed=None, multigraph=None, default=None):
|
||||
"""Assert that create_using has good properties
|
||||
|
||||
This checks for desired directedness and multi-edge properties.
|
||||
It returns `create_using` unless that is `None` when it returns
|
||||
the optionally specified default value.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
create_using : None, graph class or instance
|
||||
The input value of create_using for a function.
|
||||
directed : None or bool
|
||||
Whether to check `create_using.is_directed() == directed`.
|
||||
If None, do not assert directedness.
|
||||
multigraph : None or bool
|
||||
Whether to check `create_using.is_multigraph() == multigraph`.
|
||||
If None, do not assert multi-edge property.
|
||||
default : None or graph class
|
||||
The graph class to return if create_using is None.
|
||||
|
||||
Returns
|
||||
-------
|
||||
create_using : graph class or instance
|
||||
The provided graph class or instance, or if None, the `default` value.
|
||||
|
||||
Raises
|
||||
------
|
||||
NetworkXError
|
||||
When `create_using` doesn't match the properties specified by `directed`
|
||||
or `multigraph` parameters.
|
||||
"""
|
||||
if default is None:
|
||||
default = nx.Graph
|
||||
G = create_using if create_using is not None else default
|
||||
|
||||
G_directed = G.is_directed(None) if isinstance(G, type) else G.is_directed()
|
||||
G_multigraph = G.is_multigraph(None) if isinstance(G, type) else G.is_multigraph()
|
||||
|
||||
if directed is not None:
|
||||
if directed and not G_directed:
|
||||
raise nx.NetworkXError("create_using must be directed")
|
||||
if not directed and G_directed:
|
||||
raise nx.NetworkXError("create_using must not be directed")
|
||||
|
||||
if multigraph is not None:
|
||||
if multigraph and not G_multigraph:
|
||||
raise nx.NetworkXError("create_using must be a multi-graph")
|
||||
if not multigraph and G_multigraph:
|
||||
raise nx.NetworkXError("create_using must not be a multi-graph")
|
||||
return G
|
|
@ -0,0 +1,164 @@
|
|||
"""
|
||||
Utilities for generating random numbers, random sequences, and
|
||||
random selections.
|
||||
"""
|
||||
|
||||
import networkx as nx
|
||||
from networkx.utils import py_random_state
|
||||
|
||||
__all__ = [
|
||||
"powerlaw_sequence",
|
||||
"zipf_rv",
|
||||
"cumulative_distribution",
|
||||
"discrete_sequence",
|
||||
"random_weighted_sample",
|
||||
"weighted_choice",
|
||||
]
|
||||
|
||||
|
||||
# The same helpers for choosing random sequences from distributions
|
||||
# uses Python's random module
|
||||
# https://docs.python.org/3/library/random.html
|
||||
|
||||
|
||||
@py_random_state(2)
|
||||
def powerlaw_sequence(n, exponent=2.0, seed=None):
|
||||
"""
|
||||
Return sample sequence of length n from a power law distribution.
|
||||
"""
|
||||
return [seed.paretovariate(exponent - 1) for i in range(n)]
|
||||
|
||||
|
||||
@py_random_state(2)
|
||||
def zipf_rv(alpha, xmin=1, seed=None):
|
||||
r"""Returns a random value chosen from the Zipf distribution.
|
||||
|
||||
The return value is an integer drawn from the probability distribution
|
||||
|
||||
.. math::
|
||||
|
||||
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha, x_{\min})},
|
||||
|
||||
where $\zeta(\alpha, x_{\min})$ is the Hurwitz zeta function.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
alpha : float
|
||||
Exponent value of the distribution
|
||||
xmin : int
|
||||
Minimum value
|
||||
seed : integer, random_state, or None (default)
|
||||
Indicator of random number generation state.
|
||||
See :ref:`Randomness<randomness>`.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : int
|
||||
Random value from Zipf distribution
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError:
|
||||
If xmin < 1 or
|
||||
If alpha <= 1
|
||||
|
||||
Notes
|
||||
-----
|
||||
The rejection algorithm generates random values for a the power-law
|
||||
distribution in uniformly bounded expected time dependent on
|
||||
parameters. See [1]_ for details on its operation.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> nx.utils.zipf_rv(alpha=2, xmin=3, seed=42)
|
||||
8
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Luc Devroye, Non-Uniform Random Variate Generation,
|
||||
Springer-Verlag, New York, 1986.
|
||||
"""
|
||||
if xmin < 1:
|
||||
raise ValueError("xmin < 1")
|
||||
if alpha <= 1:
|
||||
raise ValueError("a <= 1.0")
|
||||
a1 = alpha - 1.0
|
||||
b = 2**a1
|
||||
while True:
|
||||
u = 1.0 - seed.random() # u in (0,1]
|
||||
v = seed.random() # v in [0,1)
|
||||
x = int(xmin * u ** -(1.0 / a1))
|
||||
t = (1.0 + (1.0 / x)) ** a1
|
||||
if v * x * (t - 1.0) / (b - 1.0) <= t / b:
|
||||
break
|
||||
return x
|
||||
|
||||
|
||||
def cumulative_distribution(distribution):
|
||||
"""Returns normalized cumulative distribution from discrete distribution."""
|
||||
|
||||
cdf = [0.0]
|
||||
psum = sum(distribution)
|
||||
for i in range(len(distribution)):
|
||||
cdf.append(cdf[i] + distribution[i] / psum)
|
||||
return cdf
|
||||
|
||||
|
||||
@py_random_state(3)
|
||||
def discrete_sequence(n, distribution=None, cdistribution=None, seed=None):
|
||||
"""
|
||||
Return sample sequence of length n from a given discrete distribution
|
||||
or discrete cumulative distribution.
|
||||
|
||||
One of the following must be specified.
|
||||
|
||||
distribution = histogram of values, will be normalized
|
||||
|
||||
cdistribution = normalized discrete cumulative distribution
|
||||
|
||||
"""
|
||||
import bisect
|
||||
|
||||
if cdistribution is not None:
|
||||
cdf = cdistribution
|
||||
elif distribution is not None:
|
||||
cdf = cumulative_distribution(distribution)
|
||||
else:
|
||||
raise nx.NetworkXError(
|
||||
"discrete_sequence: distribution or cdistribution missing"
|
||||
)
|
||||
|
||||
# get a uniform random number
|
||||
inputseq = [seed.random() for i in range(n)]
|
||||
|
||||
# choose from CDF
|
||||
seq = [bisect.bisect_left(cdf, s) - 1 for s in inputseq]
|
||||
return seq
|
||||
|
||||
|
||||
@py_random_state(2)
|
||||
def random_weighted_sample(mapping, k, seed=None):
|
||||
"""Returns k items without replacement from a weighted sample.
|
||||
|
||||
The input is a dictionary of items with weights as values.
|
||||
"""
|
||||
if k > len(mapping):
|
||||
raise ValueError("sample larger than population")
|
||||
sample = set()
|
||||
while len(sample) < k:
|
||||
sample.add(weighted_choice(mapping, seed))
|
||||
return list(sample)
|
||||
|
||||
|
||||
@py_random_state(1)
|
||||
def weighted_choice(mapping, seed=None):
|
||||
"""Returns a single element from a weighted sample.
|
||||
|
||||
The input is a dictionary of items with weights as values.
|
||||
"""
|
||||
# use roulette method
|
||||
rnd = seed.random() * sum(mapping.values())
|
||||
for k, w in mapping.items():
|
||||
rnd -= w
|
||||
if rnd < 0:
|
||||
return k
|
159
venv/lib/python3.13/site-packages/networkx/utils/rcm.py
Normal file
159
venv/lib/python3.13/site-packages/networkx/utils/rcm.py
Normal file
|
@ -0,0 +1,159 @@
|
|||
"""
|
||||
Cuthill-McKee ordering of graph nodes to produce sparse matrices
|
||||
"""
|
||||
|
||||
from collections import deque
|
||||
from operator import itemgetter
|
||||
|
||||
import networkx as nx
|
||||
|
||||
from ..utils import arbitrary_element
|
||||
|
||||
__all__ = ["cuthill_mckee_ordering", "reverse_cuthill_mckee_ordering"]
|
||||
|
||||
|
||||
def cuthill_mckee_ordering(G, heuristic=None):
|
||||
"""Generate an ordering (permutation) of the graph nodes to make
|
||||
a sparse matrix.
|
||||
|
||||
Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
heuristic : function, optional
|
||||
Function to choose starting node for RCM algorithm. If None
|
||||
a node from a pseudo-peripheral pair is used. A user-defined function
|
||||
can be supplied that takes a graph object and returns a single node.
|
||||
|
||||
Returns
|
||||
-------
|
||||
nodes : generator
|
||||
Generator of nodes in Cuthill-McKee ordering.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from networkx.utils import cuthill_mckee_ordering
|
||||
>>> G = nx.path_graph(4)
|
||||
>>> rcm = list(cuthill_mckee_ordering(G))
|
||||
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
|
||||
|
||||
Smallest degree node as heuristic function:
|
||||
|
||||
>>> def smallest_degree(G):
|
||||
... return min(G, key=G.degree)
|
||||
>>> rcm = list(cuthill_mckee_ordering(G, heuristic=smallest_degree))
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
reverse_cuthill_mckee_ordering
|
||||
|
||||
Notes
|
||||
-----
|
||||
The optimal solution the bandwidth reduction is NP-complete [2]_.
|
||||
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] E. Cuthill and J. McKee.
|
||||
Reducing the bandwidth of sparse symmetric matrices,
|
||||
In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969.
|
||||
http://doi.acm.org/10.1145/800195.805928
|
||||
.. [2] Steven S. Skiena. 1997. The Algorithm Design Manual.
|
||||
Springer-Verlag New York, Inc., New York, NY, USA.
|
||||
"""
|
||||
for c in nx.connected_components(G):
|
||||
yield from connected_cuthill_mckee_ordering(G.subgraph(c), heuristic)
|
||||
|
||||
|
||||
def reverse_cuthill_mckee_ordering(G, heuristic=None):
|
||||
"""Generate an ordering (permutation) of the graph nodes to make
|
||||
a sparse matrix.
|
||||
|
||||
Uses the reverse Cuthill-McKee heuristic (based on breadth-first search)
|
||||
[1]_.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
G : graph
|
||||
A NetworkX graph
|
||||
|
||||
heuristic : function, optional
|
||||
Function to choose starting node for RCM algorithm. If None
|
||||
a node from a pseudo-peripheral pair is used. A user-defined function
|
||||
can be supplied that takes a graph object and returns a single node.
|
||||
|
||||
Returns
|
||||
-------
|
||||
nodes : generator
|
||||
Generator of nodes in reverse Cuthill-McKee ordering.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from networkx.utils import reverse_cuthill_mckee_ordering
|
||||
>>> G = nx.path_graph(4)
|
||||
>>> rcm = list(reverse_cuthill_mckee_ordering(G))
|
||||
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
|
||||
|
||||
Smallest degree node as heuristic function:
|
||||
|
||||
>>> def smallest_degree(G):
|
||||
... return min(G, key=G.degree)
|
||||
>>> rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
|
||||
|
||||
|
||||
See Also
|
||||
--------
|
||||
cuthill_mckee_ordering
|
||||
|
||||
Notes
|
||||
-----
|
||||
The optimal solution the bandwidth reduction is NP-complete [2]_.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] E. Cuthill and J. McKee.
|
||||
Reducing the bandwidth of sparse symmetric matrices,
|
||||
In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969.
|
||||
http://doi.acm.org/10.1145/800195.805928
|
||||
.. [2] Steven S. Skiena. 1997. The Algorithm Design Manual.
|
||||
Springer-Verlag New York, Inc., New York, NY, USA.
|
||||
"""
|
||||
return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic)))
|
||||
|
||||
|
||||
def connected_cuthill_mckee_ordering(G, heuristic=None):
|
||||
# the cuthill mckee algorithm for connected graphs
|
||||
if heuristic is None:
|
||||
start = pseudo_peripheral_node(G)
|
||||
else:
|
||||
start = heuristic(G)
|
||||
visited = {start}
|
||||
queue = deque([start])
|
||||
while queue:
|
||||
parent = queue.popleft()
|
||||
yield parent
|
||||
nd = sorted(G.degree(set(G[parent]) - visited), key=itemgetter(1))
|
||||
children = [n for n, d in nd]
|
||||
visited.update(children)
|
||||
queue.extend(children)
|
||||
|
||||
|
||||
def pseudo_peripheral_node(G):
|
||||
# helper for cuthill-mckee to find a node in a "pseudo peripheral pair"
|
||||
# to use as good starting node
|
||||
u = arbitrary_element(G)
|
||||
lp = 0
|
||||
v = u
|
||||
while True:
|
||||
spl = nx.shortest_path_length(G, v)
|
||||
l = max(spl.values())
|
||||
if l <= lp:
|
||||
break
|
||||
lp = l
|
||||
farthest = (n for n, dist in spl.items() if dist == l)
|
||||
v, deg = min(G.degree(farthest), key=itemgetter(1))
|
||||
return v
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,11 @@
|
|||
import pytest
|
||||
|
||||
|
||||
def test_utils_namespace():
|
||||
"""Ensure objects are not unintentionally exposed in utils namespace."""
|
||||
with pytest.raises(ImportError):
|
||||
from networkx.utils import nx
|
||||
with pytest.raises(ImportError):
|
||||
from networkx.utils import sys
|
||||
with pytest.raises(ImportError):
|
||||
from networkx.utils import defaultdict, deque
|
|
@ -0,0 +1,187 @@
|
|||
import pickle
|
||||
|
||||
import pytest
|
||||
|
||||
import networkx as nx
|
||||
|
||||
sp = pytest.importorskip("scipy")
|
||||
pytest.importorskip("numpy")
|
||||
|
||||
|
||||
@nx._dispatchable(implemented_by_nx=False)
|
||||
def _stub_func(G):
|
||||
raise NotImplementedError("_stub_func is a stub")
|
||||
|
||||
|
||||
def test_dispatch_kwds_vs_args():
|
||||
G = nx.path_graph(4)
|
||||
nx.pagerank(G)
|
||||
nx.pagerank(G=G)
|
||||
with pytest.raises(TypeError):
|
||||
nx.pagerank()
|
||||
|
||||
|
||||
def test_pickle():
|
||||
count = 0
|
||||
for name, func in nx.utils.backends._registered_algorithms.items():
|
||||
pickled = pickle.dumps(func.__wrapped__)
|
||||
assert pickle.loads(pickled) is func.__wrapped__
|
||||
try:
|
||||
# Some functions can't be pickled, but it's not b/c of _dispatchable
|
||||
pickled = pickle.dumps(func)
|
||||
except pickle.PicklingError:
|
||||
continue
|
||||
assert pickle.loads(pickled) is func
|
||||
count += 1
|
||||
assert count > 0
|
||||
assert pickle.loads(pickle.dumps(nx.inverse_line_graph)) is nx.inverse_line_graph
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
"not nx.config.backend_priority.algos "
|
||||
"or nx.config.backend_priority.algos[0] != 'nx_loopback'"
|
||||
)
|
||||
def test_graph_converter_needs_backend():
|
||||
# When testing, `nx.from_scipy_sparse_array` will *always* call the backend
|
||||
# implementation if it's implemented. If `backend=` isn't given, then the result
|
||||
# will be converted back to NetworkX via `convert_to_nx`.
|
||||
# If not testing, then calling `nx.from_scipy_sparse_array` w/o `backend=` will
|
||||
# always call the original version. `backend=` is *required* to call the backend.
|
||||
from networkx.classes.tests.dispatch_interface import (
|
||||
LoopbackBackendInterface,
|
||||
LoopbackGraph,
|
||||
)
|
||||
|
||||
A = sp.sparse.coo_array([[0, 3, 2], [3, 0, 1], [2, 1, 0]])
|
||||
|
||||
side_effects = []
|
||||
|
||||
def from_scipy_sparse_array(self, *args, **kwargs):
|
||||
side_effects.append(1) # Just to prove this was called
|
||||
return self.convert_from_nx(
|
||||
self.__getattr__("from_scipy_sparse_array")(*args, **kwargs),
|
||||
preserve_edge_attrs=True,
|
||||
preserve_node_attrs=True,
|
||||
preserve_graph_attrs=True,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def convert_to_nx(obj, *, name=None):
|
||||
if type(obj) is nx.Graph:
|
||||
return obj
|
||||
return nx.Graph(obj)
|
||||
|
||||
# *This mutates LoopbackBackendInterface!*
|
||||
orig_convert_to_nx = LoopbackBackendInterface.convert_to_nx
|
||||
LoopbackBackendInterface.convert_to_nx = convert_to_nx
|
||||
LoopbackBackendInterface.from_scipy_sparse_array = from_scipy_sparse_array
|
||||
|
||||
try:
|
||||
assert side_effects == []
|
||||
assert type(nx.from_scipy_sparse_array(A)) is nx.Graph
|
||||
assert side_effects == [1]
|
||||
assert (
|
||||
type(nx.from_scipy_sparse_array(A, backend="nx_loopback")) is LoopbackGraph
|
||||
)
|
||||
assert side_effects == [1, 1]
|
||||
# backend="networkx" is default implementation
|
||||
assert type(nx.from_scipy_sparse_array(A, backend="networkx")) is nx.Graph
|
||||
assert side_effects == [1, 1]
|
||||
finally:
|
||||
LoopbackBackendInterface.convert_to_nx = staticmethod(orig_convert_to_nx)
|
||||
del LoopbackBackendInterface.from_scipy_sparse_array
|
||||
with pytest.raises(ImportError, match="backend is not installed"):
|
||||
nx.from_scipy_sparse_array(A, backend="bad-backend-name")
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
"not nx.config.backend_priority.algos "
|
||||
"or nx.config.backend_priority.algos[0] != 'nx_loopback'"
|
||||
)
|
||||
def test_networkx_backend():
|
||||
"""Test using `backend="networkx"` in a dispatchable function."""
|
||||
# (Implementing this test is harder than it should be)
|
||||
from networkx.classes.tests.dispatch_interface import (
|
||||
LoopbackBackendInterface,
|
||||
LoopbackGraph,
|
||||
)
|
||||
|
||||
G = LoopbackGraph()
|
||||
G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4)])
|
||||
|
||||
@staticmethod
|
||||
def convert_to_nx(obj, *, name=None):
|
||||
if isinstance(obj, LoopbackGraph):
|
||||
new_graph = nx.Graph()
|
||||
new_graph.__dict__.update(obj.__dict__)
|
||||
return new_graph
|
||||
return obj
|
||||
|
||||
# *This mutates LoopbackBackendInterface!*
|
||||
# This uses the same trick as in the previous test.
|
||||
orig_convert_to_nx = LoopbackBackendInterface.convert_to_nx
|
||||
LoopbackBackendInterface.convert_to_nx = convert_to_nx
|
||||
try:
|
||||
G2 = nx.ego_graph(G, 0, backend="networkx")
|
||||
assert type(G2) is nx.Graph
|
||||
finally:
|
||||
LoopbackBackendInterface.convert_to_nx = staticmethod(orig_convert_to_nx)
|
||||
|
||||
|
||||
def test_dispatchable_are_functions():
|
||||
assert type(nx.pagerank) is type(nx.pagerank.orig_func)
|
||||
|
||||
|
||||
@pytest.mark.skipif("not nx.utils.backends.backends")
|
||||
def test_mixing_backend_graphs():
|
||||
from networkx.classes.tests import dispatch_interface
|
||||
|
||||
G = nx.Graph()
|
||||
G.add_edge(1, 2)
|
||||
G.add_edge(2, 3)
|
||||
H = nx.Graph()
|
||||
H.add_edge(2, 3)
|
||||
rv = nx.intersection(G, H)
|
||||
assert set(nx.intersection(G, H)) == {2, 3}
|
||||
G2 = dispatch_interface.convert(G)
|
||||
H2 = dispatch_interface.convert(H)
|
||||
if "nx_loopback" in nx.config.backend_priority:
|
||||
# Auto-convert
|
||||
assert set(nx.intersection(G2, H)) == {2, 3}
|
||||
assert set(nx.intersection(G, H2)) == {2, 3}
|
||||
elif not nx.config.backend_priority and "nx_loopback" not in nx.config.backends:
|
||||
# G2 and H2 are backend objects for a backend that is not registered!
|
||||
with pytest.raises(ImportError, match="backend is not installed"):
|
||||
nx.intersection(G2, H)
|
||||
with pytest.raises(ImportError, match="backend is not installed"):
|
||||
nx.intersection(G, H2)
|
||||
# It would be nice to test passing graphs from *different* backends,
|
||||
# but we are not set up to do this yet.
|
||||
|
||||
|
||||
def test_bad_backend_name():
|
||||
"""Using `backend=` raises with unknown backend even if there are no backends."""
|
||||
with pytest.raises(
|
||||
ImportError, match="'this_backend_does_not_exist' backend is not installed"
|
||||
):
|
||||
nx.null_graph(backend="this_backend_does_not_exist")
|
||||
|
||||
|
||||
def test_not_implemented_by_nx():
|
||||
assert "networkx" in nx.pagerank.backends
|
||||
assert "networkx" not in _stub_func.backends
|
||||
|
||||
if "nx_loopback" in nx.config.backends:
|
||||
from networkx.classes.tests.dispatch_interface import LoopbackBackendInterface
|
||||
|
||||
def stub_func_implementation(G):
|
||||
return True
|
||||
|
||||
LoopbackBackendInterface._stub_func = staticmethod(stub_func_implementation)
|
||||
try:
|
||||
assert _stub_func(nx.Graph()) is True
|
||||
finally:
|
||||
del LoopbackBackendInterface._stub_func
|
||||
|
||||
with pytest.raises(NotImplementedError):
|
||||
_stub_func(nx.Graph())
|
|
@ -0,0 +1,263 @@
|
|||
import collections
|
||||
import pickle
|
||||
|
||||
import pytest
|
||||
|
||||
import networkx as nx
|
||||
from networkx.utils.configs import BackendPriorities, Config
|
||||
|
||||
|
||||
# Define this at module level so we can test pickling
|
||||
class ExampleConfig(Config):
|
||||
"""Example configuration."""
|
||||
|
||||
x: int
|
||||
y: str
|
||||
|
||||
def _on_setattr(self, key, value):
|
||||
if key == "x" and value <= 0:
|
||||
raise ValueError("x must be positive")
|
||||
if key == "y" and not isinstance(value, str):
|
||||
raise TypeError("y must be a str")
|
||||
return value
|
||||
|
||||
|
||||
class EmptyConfig(Config):
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.parametrize("cfg", [EmptyConfig(), Config()])
|
||||
def test_config_empty(cfg):
|
||||
assert dir(cfg) == []
|
||||
with pytest.raises(AttributeError):
|
||||
cfg.x = 1
|
||||
with pytest.raises(KeyError):
|
||||
cfg["x"] = 1
|
||||
with pytest.raises(AttributeError):
|
||||
cfg.x
|
||||
with pytest.raises(KeyError):
|
||||
cfg["x"]
|
||||
assert len(cfg) == 0
|
||||
assert "x" not in cfg
|
||||
assert cfg == cfg
|
||||
assert cfg.get("x", 2) == 2
|
||||
assert set(cfg.keys()) == set()
|
||||
assert set(cfg.values()) == set()
|
||||
assert set(cfg.items()) == set()
|
||||
cfg2 = pickle.loads(pickle.dumps(cfg))
|
||||
assert cfg == cfg2
|
||||
assert isinstance(cfg, collections.abc.Collection)
|
||||
assert isinstance(cfg, collections.abc.Mapping)
|
||||
|
||||
|
||||
def test_config_subclass():
|
||||
with pytest.raises(TypeError, match="missing 2 required keyword-only"):
|
||||
ExampleConfig()
|
||||
with pytest.raises(ValueError, match="x must be positive"):
|
||||
ExampleConfig(x=0, y="foo")
|
||||
with pytest.raises(TypeError, match="unexpected keyword"):
|
||||
ExampleConfig(x=1, y="foo", z="bad config")
|
||||
with pytest.raises(TypeError, match="unexpected keyword"):
|
||||
EmptyConfig(z="bad config")
|
||||
cfg = ExampleConfig(x=1, y="foo")
|
||||
assert cfg.x == 1
|
||||
assert cfg["x"] == 1
|
||||
assert cfg["y"] == "foo"
|
||||
assert cfg.y == "foo"
|
||||
assert "x" in cfg
|
||||
assert "y" in cfg
|
||||
assert "z" not in cfg
|
||||
assert len(cfg) == 2
|
||||
assert set(iter(cfg)) == {"x", "y"}
|
||||
assert set(cfg.keys()) == {"x", "y"}
|
||||
assert set(cfg.values()) == {1, "foo"}
|
||||
assert set(cfg.items()) == {("x", 1), ("y", "foo")}
|
||||
assert dir(cfg) == ["x", "y"]
|
||||
cfg.x = 2
|
||||
cfg["y"] = "bar"
|
||||
assert cfg["x"] == 2
|
||||
assert cfg.y == "bar"
|
||||
with pytest.raises(TypeError, match="can't be deleted"):
|
||||
del cfg.x
|
||||
with pytest.raises(TypeError, match="can't be deleted"):
|
||||
del cfg["y"]
|
||||
assert cfg.x == 2
|
||||
assert cfg == cfg
|
||||
assert cfg == ExampleConfig(x=2, y="bar")
|
||||
assert cfg != ExampleConfig(x=3, y="baz")
|
||||
assert cfg != Config(x=2, y="bar")
|
||||
with pytest.raises(TypeError, match="y must be a str"):
|
||||
cfg["y"] = 5
|
||||
with pytest.raises(ValueError, match="x must be positive"):
|
||||
cfg.x = -5
|
||||
assert cfg.get("x", 10) == 2
|
||||
with pytest.raises(AttributeError):
|
||||
cfg.z = 5
|
||||
with pytest.raises(KeyError):
|
||||
cfg["z"] = 5
|
||||
with pytest.raises(AttributeError):
|
||||
cfg.z
|
||||
with pytest.raises(KeyError):
|
||||
cfg["z"]
|
||||
cfg2 = pickle.loads(pickle.dumps(cfg))
|
||||
assert cfg == cfg2
|
||||
assert cfg.__doc__ == "Example configuration."
|
||||
assert cfg2.__doc__ == "Example configuration."
|
||||
|
||||
|
||||
def test_config_defaults():
|
||||
class DefaultConfig(Config):
|
||||
x: int = 0
|
||||
y: int
|
||||
|
||||
cfg = DefaultConfig(y=1)
|
||||
assert cfg.x == 0
|
||||
cfg = DefaultConfig(x=2, y=1)
|
||||
assert cfg.x == 2
|
||||
|
||||
|
||||
def test_nxconfig():
|
||||
assert isinstance(nx.config.backend_priority, BackendPriorities)
|
||||
assert isinstance(nx.config.backend_priority.algos, list)
|
||||
assert isinstance(nx.config.backends, Config)
|
||||
with pytest.raises(TypeError, match="must be a list of backend names"):
|
||||
nx.config.backend_priority.algos = "nx_loopback"
|
||||
with pytest.raises(ValueError, match="Unknown backend when setting"):
|
||||
nx.config.backend_priority.algos = ["this_almost_certainly_is_not_a_backend"]
|
||||
with pytest.raises(TypeError, match="must be a Config of backend configs"):
|
||||
nx.config.backends = {}
|
||||
with pytest.raises(TypeError, match="must be a Config of backend configs"):
|
||||
nx.config.backends = Config(plausible_backend_name={})
|
||||
with pytest.raises(ValueError, match="Unknown backend when setting"):
|
||||
nx.config.backends = Config(this_almost_certainly_is_not_a_backend=Config())
|
||||
with pytest.raises(TypeError, match="must be True or False"):
|
||||
nx.config.cache_converted_graphs = "bad value"
|
||||
with pytest.raises(TypeError, match="must be a set of "):
|
||||
nx.config.warnings_to_ignore = 7
|
||||
with pytest.raises(ValueError, match="Unknown warning "):
|
||||
nx.config.warnings_to_ignore = {"bad value"}
|
||||
|
||||
prev = nx.config.backend_priority
|
||||
try:
|
||||
nx.config.backend_priority = ["networkx"]
|
||||
assert isinstance(nx.config.backend_priority, BackendPriorities)
|
||||
assert nx.config.backend_priority.algos == ["networkx"]
|
||||
finally:
|
||||
nx.config.backend_priority = prev
|
||||
|
||||
|
||||
def test_nxconfig_context():
|
||||
# We do some special handling so that `nx.config.backend_priority = val`
|
||||
# actually does `nx.config.backend_priority.algos = val`.
|
||||
orig = nx.config.backend_priority.algos
|
||||
val = [] if orig else ["networkx"]
|
||||
assert orig != val
|
||||
assert nx.config.backend_priority.algos != val
|
||||
with nx.config(backend_priority=val):
|
||||
assert nx.config.backend_priority.algos == val
|
||||
assert nx.config.backend_priority.algos == orig
|
||||
with nx.config.backend_priority(algos=val):
|
||||
assert nx.config.backend_priority.algos == val
|
||||
assert nx.config.backend_priority.algos == orig
|
||||
bad = ["bad-backend"]
|
||||
with pytest.raises(ValueError, match="Unknown backend"):
|
||||
nx.config.backend_priority = bad
|
||||
with pytest.raises(ValueError, match="Unknown backend"):
|
||||
with nx.config(backend_priority=bad):
|
||||
pass
|
||||
with pytest.raises(ValueError, match="Unknown backend"):
|
||||
with nx.config.backend_priority(algos=bad):
|
||||
pass
|
||||
|
||||
|
||||
def test_not_strict():
|
||||
class FlexibleConfig(Config, strict=False):
|
||||
x: int
|
||||
|
||||
cfg = FlexibleConfig(x=1)
|
||||
assert "_strict" not in cfg
|
||||
assert len(cfg) == 1
|
||||
assert list(cfg) == ["x"]
|
||||
assert list(cfg.keys()) == ["x"]
|
||||
assert list(cfg.values()) == [1]
|
||||
assert list(cfg.items()) == [("x", 1)]
|
||||
assert cfg.x == 1
|
||||
assert cfg["x"] == 1
|
||||
assert "x" in cfg
|
||||
assert hasattr(cfg, "x")
|
||||
assert "FlexibleConfig(x=1)" in repr(cfg)
|
||||
assert cfg == FlexibleConfig(x=1)
|
||||
del cfg.x
|
||||
assert "FlexibleConfig()" in repr(cfg)
|
||||
assert len(cfg) == 0
|
||||
assert not hasattr(cfg, "x")
|
||||
assert "x" not in cfg
|
||||
assert not hasattr(cfg, "y")
|
||||
assert "y" not in cfg
|
||||
cfg.y = 2
|
||||
assert len(cfg) == 1
|
||||
assert list(cfg) == ["y"]
|
||||
assert list(cfg.keys()) == ["y"]
|
||||
assert list(cfg.values()) == [2]
|
||||
assert list(cfg.items()) == [("y", 2)]
|
||||
assert cfg.y == 2
|
||||
assert cfg["y"] == 2
|
||||
assert hasattr(cfg, "y")
|
||||
assert "y" in cfg
|
||||
del cfg["y"]
|
||||
assert len(cfg) == 0
|
||||
assert list(cfg) == []
|
||||
with pytest.raises(AttributeError, match="y"):
|
||||
del cfg.y
|
||||
with pytest.raises(KeyError, match="y"):
|
||||
del cfg["y"]
|
||||
with pytest.raises(TypeError, match="missing 1 required keyword-only"):
|
||||
FlexibleConfig()
|
||||
# Be strict when first creating the config object
|
||||
with pytest.raises(TypeError, match="unexpected keyword argument 'y'"):
|
||||
FlexibleConfig(x=1, y=2)
|
||||
|
||||
class FlexibleConfigWithDefault(Config, strict=False):
|
||||
x: int = 0
|
||||
|
||||
assert FlexibleConfigWithDefault().x == 0
|
||||
assert FlexibleConfigWithDefault(x=1)["x"] == 1
|
||||
|
||||
|
||||
def test_context():
|
||||
cfg = Config(x=1)
|
||||
with cfg(x=2) as c:
|
||||
assert c.x == 2
|
||||
c.x = 3
|
||||
assert cfg.x == 3
|
||||
assert cfg.x == 1
|
||||
|
||||
with cfg(x=2) as c:
|
||||
assert c == cfg
|
||||
assert cfg.x == 2
|
||||
with cfg(x=3) as c2:
|
||||
assert c2 == cfg
|
||||
assert cfg.x == 3
|
||||
with pytest.raises(RuntimeError, match="context manager without"):
|
||||
with cfg as c3: # Forgot to call `cfg(...)`
|
||||
pass
|
||||
assert cfg.x == 3
|
||||
assert cfg.x == 2
|
||||
assert cfg.x == 1
|
||||
|
||||
c = cfg(x=4) # Not yet as context (not recommended, but possible)
|
||||
assert c == cfg
|
||||
assert cfg.x == 4
|
||||
# Cheat by looking at internal data; context stack should only grow with __enter__
|
||||
assert cfg._prev is not None
|
||||
assert cfg._context_stack == []
|
||||
with c:
|
||||
assert c == cfg
|
||||
assert cfg.x == 4
|
||||
assert cfg.x == 1
|
||||
# Cheat again; there was no preceding `cfg(...)` call this time
|
||||
assert cfg._prev is None
|
||||
with pytest.raises(RuntimeError, match="context manager without"):
|
||||
with cfg:
|
||||
pass
|
||||
assert cfg.x == 1
|
|
@ -0,0 +1,510 @@
|
|||
import os
|
||||
import pathlib
|
||||
import random
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
|
||||
import networkx as nx
|
||||
from networkx.utils.decorators import (
|
||||
argmap,
|
||||
not_implemented_for,
|
||||
np_random_state,
|
||||
open_file,
|
||||
py_random_state,
|
||||
)
|
||||
from networkx.utils.misc import PythonRandomInterface, PythonRandomViaNumpyBits
|
||||
|
||||
|
||||
def test_not_implemented_decorator():
|
||||
@not_implemented_for("directed")
|
||||
def test_d(G):
|
||||
pass
|
||||
|
||||
test_d(nx.Graph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_d(nx.DiGraph())
|
||||
|
||||
@not_implemented_for("undirected")
|
||||
def test_u(G):
|
||||
pass
|
||||
|
||||
test_u(nx.DiGraph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_u(nx.Graph())
|
||||
|
||||
@not_implemented_for("multigraph")
|
||||
def test_m(G):
|
||||
pass
|
||||
|
||||
test_m(nx.Graph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_m(nx.MultiGraph())
|
||||
|
||||
@not_implemented_for("graph")
|
||||
def test_g(G):
|
||||
pass
|
||||
|
||||
test_g(nx.MultiGraph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_g(nx.Graph())
|
||||
|
||||
# not MultiDiGraph (multiple arguments => AND)
|
||||
@not_implemented_for("directed", "multigraph")
|
||||
def test_not_md(G):
|
||||
pass
|
||||
|
||||
test_not_md(nx.Graph())
|
||||
test_not_md(nx.DiGraph())
|
||||
test_not_md(nx.MultiGraph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_not_md(nx.MultiDiGraph())
|
||||
|
||||
# Graph only (multiple decorators => OR)
|
||||
@not_implemented_for("directed")
|
||||
@not_implemented_for("multigraph")
|
||||
def test_graph_only(G):
|
||||
pass
|
||||
|
||||
test_graph_only(nx.Graph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_graph_only(nx.DiGraph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_graph_only(nx.MultiGraph())
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
test_graph_only(nx.MultiDiGraph())
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
not_implemented_for("directed", "undirected")
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
not_implemented_for("multigraph", "graph")
|
||||
|
||||
|
||||
def test_not_implemented_decorator_key():
|
||||
with pytest.raises(KeyError):
|
||||
|
||||
@not_implemented_for("foo")
|
||||
def test1(G):
|
||||
pass
|
||||
|
||||
test1(nx.Graph())
|
||||
|
||||
|
||||
def test_not_implemented_decorator_raise():
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
|
||||
@not_implemented_for("graph")
|
||||
def test1(G):
|
||||
pass
|
||||
|
||||
test1(nx.Graph())
|
||||
|
||||
|
||||
class TestOpenFileDecorator:
|
||||
def setup_method(self):
|
||||
self.text = ["Blah... ", "BLAH ", "BLAH!!!!"]
|
||||
self.fobj = tempfile.NamedTemporaryFile("wb+", delete=False)
|
||||
self.name = self.fobj.name
|
||||
|
||||
def teardown_method(self):
|
||||
self.fobj.close()
|
||||
os.unlink(self.name)
|
||||
|
||||
def write(self, path):
|
||||
for text in self.text:
|
||||
path.write(text.encode("ascii"))
|
||||
|
||||
@open_file(1, "r")
|
||||
def read(self, path):
|
||||
return path.readlines()[0]
|
||||
|
||||
@staticmethod
|
||||
@open_file(0, "wb")
|
||||
def writer_arg0(path):
|
||||
path.write(b"demo")
|
||||
|
||||
@open_file(1, "wb+")
|
||||
def writer_arg1(self, path):
|
||||
self.write(path)
|
||||
|
||||
@open_file(2, "wb")
|
||||
def writer_arg2default(self, x, path=None):
|
||||
if path is None:
|
||||
with tempfile.NamedTemporaryFile("wb+") as fh:
|
||||
self.write(fh)
|
||||
else:
|
||||
self.write(path)
|
||||
|
||||
@open_file(4, "wb")
|
||||
def writer_arg4default(self, x, y, other="hello", path=None, **kwargs):
|
||||
if path is None:
|
||||
with tempfile.NamedTemporaryFile("wb+") as fh:
|
||||
self.write(fh)
|
||||
else:
|
||||
self.write(path)
|
||||
|
||||
@open_file("path", "wb")
|
||||
def writer_kwarg(self, **kwargs):
|
||||
path = kwargs.get("path", None)
|
||||
if path is None:
|
||||
with tempfile.NamedTemporaryFile("wb+") as fh:
|
||||
self.write(fh)
|
||||
else:
|
||||
self.write(path)
|
||||
|
||||
def test_writer_arg0_str(self):
|
||||
self.writer_arg0(self.name)
|
||||
|
||||
def test_writer_arg0_fobj(self):
|
||||
self.writer_arg0(self.fobj)
|
||||
|
||||
def test_writer_arg0_pathlib(self):
|
||||
self.writer_arg0(pathlib.Path(self.name))
|
||||
|
||||
def test_writer_arg1_str(self):
|
||||
self.writer_arg1(self.name)
|
||||
assert self.read(self.name) == "".join(self.text)
|
||||
|
||||
def test_writer_arg1_fobj(self):
|
||||
self.writer_arg1(self.fobj)
|
||||
assert not self.fobj.closed
|
||||
self.fobj.close()
|
||||
assert self.read(self.name) == "".join(self.text)
|
||||
|
||||
def test_writer_arg2default_str(self):
|
||||
self.writer_arg2default(0, path=None)
|
||||
self.writer_arg2default(0, path=self.name)
|
||||
assert self.read(self.name) == "".join(self.text)
|
||||
|
||||
def test_writer_arg2default_fobj(self):
|
||||
self.writer_arg2default(0, path=self.fobj)
|
||||
assert not self.fobj.closed
|
||||
self.fobj.close()
|
||||
assert self.read(self.name) == "".join(self.text)
|
||||
|
||||
def test_writer_arg2default_fobj_path_none(self):
|
||||
self.writer_arg2default(0, path=None)
|
||||
|
||||
def test_writer_arg4default_fobj(self):
|
||||
self.writer_arg4default(0, 1, dog="dog", other="other")
|
||||
self.writer_arg4default(0, 1, dog="dog", other="other", path=self.name)
|
||||
assert self.read(self.name) == "".join(self.text)
|
||||
|
||||
def test_writer_kwarg_str(self):
|
||||
self.writer_kwarg(path=self.name)
|
||||
assert self.read(self.name) == "".join(self.text)
|
||||
|
||||
def test_writer_kwarg_fobj(self):
|
||||
self.writer_kwarg(path=self.fobj)
|
||||
self.fobj.close()
|
||||
assert self.read(self.name) == "".join(self.text)
|
||||
|
||||
def test_writer_kwarg_path_none(self):
|
||||
self.writer_kwarg(path=None)
|
||||
|
||||
|
||||
class TestRandomState:
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
global np
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
@np_random_state(1)
|
||||
def instantiate_np_random_state(self, random_state):
|
||||
allowed = (np.random.RandomState, np.random.Generator)
|
||||
assert isinstance(random_state, allowed)
|
||||
return random_state.random()
|
||||
|
||||
@py_random_state(1)
|
||||
def instantiate_py_random_state(self, random_state):
|
||||
allowed = (random.Random, PythonRandomInterface, PythonRandomViaNumpyBits)
|
||||
assert isinstance(random_state, allowed)
|
||||
return random_state.random()
|
||||
|
||||
def test_random_state_None(self):
|
||||
np.random.seed(42)
|
||||
rv = np.random.random()
|
||||
np.random.seed(42)
|
||||
assert rv == self.instantiate_np_random_state(None)
|
||||
|
||||
random.seed(42)
|
||||
rv = random.random()
|
||||
random.seed(42)
|
||||
assert rv == self.instantiate_py_random_state(None)
|
||||
|
||||
def test_random_state_np_random(self):
|
||||
np.random.seed(42)
|
||||
rv = np.random.random()
|
||||
np.random.seed(42)
|
||||
assert rv == self.instantiate_np_random_state(np.random)
|
||||
np.random.seed(42)
|
||||
assert rv == self.instantiate_py_random_state(np.random)
|
||||
|
||||
def test_random_state_int(self):
|
||||
np.random.seed(42)
|
||||
np_rv = np.random.random()
|
||||
random.seed(42)
|
||||
py_rv = random.random()
|
||||
|
||||
np.random.seed(42)
|
||||
seed = 1
|
||||
rval = self.instantiate_np_random_state(seed)
|
||||
rval_expected = np.random.RandomState(seed).rand()
|
||||
assert rval == rval_expected
|
||||
# test that global seed wasn't changed in function
|
||||
assert np_rv == np.random.random()
|
||||
|
||||
random.seed(42)
|
||||
rval = self.instantiate_py_random_state(seed)
|
||||
rval_expected = random.Random(seed).random()
|
||||
assert rval == rval_expected
|
||||
# test that global seed wasn't changed in function
|
||||
assert py_rv == random.random()
|
||||
|
||||
def test_random_state_np_random_Generator(self):
|
||||
np.random.seed(42)
|
||||
np_rv = np.random.random()
|
||||
np.random.seed(42)
|
||||
seed = 1
|
||||
|
||||
rng = np.random.default_rng(seed)
|
||||
rval = self.instantiate_np_random_state(rng)
|
||||
rval_expected = np.random.default_rng(seed).random()
|
||||
assert rval == rval_expected
|
||||
|
||||
rval = self.instantiate_py_random_state(rng)
|
||||
rval_expected = np.random.default_rng(seed).random(size=2)[1]
|
||||
assert rval == rval_expected
|
||||
# test that global seed wasn't changed in function
|
||||
assert np_rv == np.random.random()
|
||||
|
||||
def test_random_state_np_random_RandomState(self):
|
||||
np.random.seed(42)
|
||||
np_rv = np.random.random()
|
||||
np.random.seed(42)
|
||||
seed = 1
|
||||
|
||||
rng = np.random.RandomState(seed)
|
||||
rval = self.instantiate_np_random_state(rng)
|
||||
rval_expected = np.random.RandomState(seed).random()
|
||||
assert rval == rval_expected
|
||||
|
||||
rval = self.instantiate_py_random_state(rng)
|
||||
rval_expected = np.random.RandomState(seed).random(size=2)[1]
|
||||
assert rval == rval_expected
|
||||
# test that global seed wasn't changed in function
|
||||
assert np_rv == np.random.random()
|
||||
|
||||
def test_random_state_py_random(self):
|
||||
seed = 1
|
||||
rng = random.Random(seed)
|
||||
rv = self.instantiate_py_random_state(rng)
|
||||
assert rv == random.Random(seed).random()
|
||||
|
||||
pytest.raises(ValueError, self.instantiate_np_random_state, rng)
|
||||
|
||||
|
||||
def test_random_state_string_arg_index():
|
||||
with pytest.raises(nx.NetworkXError):
|
||||
|
||||
@np_random_state("a")
|
||||
def make_random_state(rs):
|
||||
pass
|
||||
|
||||
rstate = make_random_state(1)
|
||||
|
||||
|
||||
def test_py_random_state_string_arg_index():
|
||||
with pytest.raises(nx.NetworkXError):
|
||||
|
||||
@py_random_state("a")
|
||||
def make_random_state(rs):
|
||||
pass
|
||||
|
||||
rstate = make_random_state(1)
|
||||
|
||||
|
||||
def test_random_state_invalid_arg_index():
|
||||
with pytest.raises(nx.NetworkXError):
|
||||
|
||||
@np_random_state(2)
|
||||
def make_random_state(rs):
|
||||
pass
|
||||
|
||||
rstate = make_random_state(1)
|
||||
|
||||
|
||||
def test_py_random_state_invalid_arg_index():
|
||||
with pytest.raises(nx.NetworkXError):
|
||||
|
||||
@py_random_state(2)
|
||||
def make_random_state(rs):
|
||||
pass
|
||||
|
||||
rstate = make_random_state(1)
|
||||
|
||||
|
||||
class TestArgmap:
|
||||
class ArgmapError(RuntimeError):
|
||||
pass
|
||||
|
||||
def test_trivial_function(self):
|
||||
def do_not_call(x):
|
||||
raise ArgmapError("do not call this function")
|
||||
|
||||
@argmap(do_not_call)
|
||||
def trivial_argmap():
|
||||
return 1
|
||||
|
||||
assert trivial_argmap() == 1
|
||||
|
||||
def test_trivial_iterator(self):
|
||||
def do_not_call(x):
|
||||
raise ArgmapError("do not call this function")
|
||||
|
||||
@argmap(do_not_call)
|
||||
def trivial_argmap():
|
||||
yield from (1, 2, 3)
|
||||
|
||||
assert tuple(trivial_argmap()) == (1, 2, 3)
|
||||
|
||||
def test_contextmanager(self):
|
||||
container = []
|
||||
|
||||
def contextmanager(x):
|
||||
nonlocal container
|
||||
return x, lambda: container.append(x)
|
||||
|
||||
@argmap(contextmanager, 0, 1, 2, try_finally=True)
|
||||
def foo(x, y, z):
|
||||
return x, y, z
|
||||
|
||||
x, y, z = foo("a", "b", "c")
|
||||
|
||||
# context exits are called in reverse
|
||||
assert container == ["c", "b", "a"]
|
||||
|
||||
def test_tryfinally_generator(self):
|
||||
container = []
|
||||
|
||||
def singleton(x):
|
||||
return (x,)
|
||||
|
||||
with pytest.raises(nx.NetworkXError):
|
||||
|
||||
@argmap(singleton, 0, 1, 2, try_finally=True)
|
||||
def foo(x, y, z):
|
||||
yield from (x, y, z)
|
||||
|
||||
@argmap(singleton, 0, 1, 2)
|
||||
def foo(x, y, z):
|
||||
return x + y + z
|
||||
|
||||
q = foo("a", "b", "c")
|
||||
|
||||
assert q == ("a", "b", "c")
|
||||
|
||||
def test_actual_vararg(self):
|
||||
@argmap(lambda x: -x, 4)
|
||||
def foo(x, y, *args):
|
||||
return (x, y) + tuple(args)
|
||||
|
||||
assert foo(1, 2, 3, 4, 5, 6) == (1, 2, 3, 4, -5, 6)
|
||||
|
||||
def test_signature_destroying_intermediate_decorator(self):
|
||||
def add_one_to_first_bad_decorator(f):
|
||||
"""Bad because it doesn't wrap the f signature (clobbers it)"""
|
||||
|
||||
def decorated(a, *args, **kwargs):
|
||||
return f(a + 1, *args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
add_two_to_second = argmap(lambda b: b + 2, 1)
|
||||
|
||||
@add_two_to_second
|
||||
@add_one_to_first_bad_decorator
|
||||
def add_one_and_two(a, b):
|
||||
return a, b
|
||||
|
||||
assert add_one_and_two(5, 5) == (6, 7)
|
||||
|
||||
def test_actual_kwarg(self):
|
||||
@argmap(lambda x: -x, "arg")
|
||||
def foo(*, arg):
|
||||
return arg
|
||||
|
||||
assert foo(arg=3) == -3
|
||||
|
||||
def test_nested_tuple(self):
|
||||
def xform(x, y):
|
||||
u, v = y
|
||||
return x + u + v, (x + u, x + v)
|
||||
|
||||
# we're testing args and kwargs here, too
|
||||
@argmap(xform, (0, ("t", 2)))
|
||||
def foo(a, *args, **kwargs):
|
||||
return a, args, kwargs
|
||||
|
||||
a, args, kwargs = foo(1, 2, 3, t=4)
|
||||
|
||||
assert a == 1 + 4 + 3
|
||||
assert args == (2, 1 + 3)
|
||||
assert kwargs == {"t": 1 + 4}
|
||||
|
||||
def test_flatten(self):
|
||||
assert tuple(argmap._flatten([[[[[], []], [], []], [], [], []]], set())) == ()
|
||||
|
||||
rlist = ["a", ["b", "c"], [["d"], "e"], "f"]
|
||||
assert "".join(argmap._flatten(rlist, set())) == "abcdef"
|
||||
|
||||
def test_indent(self):
|
||||
code = "\n".join(
|
||||
argmap._indent(
|
||||
*[
|
||||
"try:",
|
||||
"try:",
|
||||
"pass#",
|
||||
"finally:",
|
||||
"pass#",
|
||||
"#",
|
||||
"finally:",
|
||||
"pass#",
|
||||
]
|
||||
)
|
||||
)
|
||||
assert (
|
||||
code
|
||||
== """try:
|
||||
try:
|
||||
pass#
|
||||
finally:
|
||||
pass#
|
||||
#
|
||||
finally:
|
||||
pass#"""
|
||||
)
|
||||
|
||||
def test_immediate_raise(self):
|
||||
@not_implemented_for("directed")
|
||||
def yield_nodes(G):
|
||||
yield from G
|
||||
|
||||
G = nx.Graph([(1, 2)])
|
||||
D = nx.DiGraph()
|
||||
|
||||
# test first call (argmap is compiled and executed)
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
node_iter = yield_nodes(D)
|
||||
|
||||
# test second call (argmap is only executed)
|
||||
with pytest.raises(nx.NetworkXNotImplemented):
|
||||
node_iter = yield_nodes(D)
|
||||
|
||||
# ensure that generators still make generators
|
||||
node_iter = yield_nodes(G)
|
||||
next(node_iter)
|
||||
next(node_iter)
|
||||
with pytest.raises(StopIteration):
|
||||
next(node_iter)
|
|
@ -0,0 +1,131 @@
|
|||
import pytest
|
||||
|
||||
import networkx as nx
|
||||
from networkx.utils import BinaryHeap, PairingHeap
|
||||
|
||||
|
||||
class X:
|
||||
def __eq__(self, other):
|
||||
raise self is other
|
||||
|
||||
def __ne__(self, other):
|
||||
raise self is not other
|
||||
|
||||
def __lt__(self, other):
|
||||
raise TypeError("cannot compare")
|
||||
|
||||
def __le__(self, other):
|
||||
raise TypeError("cannot compare")
|
||||
|
||||
def __ge__(self, other):
|
||||
raise TypeError("cannot compare")
|
||||
|
||||
def __gt__(self, other):
|
||||
raise TypeError("cannot compare")
|
||||
|
||||
def __hash__(self):
|
||||
return hash(id(self))
|
||||
|
||||
|
||||
x = X()
|
||||
|
||||
|
||||
data = [ # min should not invent an element.
|
||||
("min", nx.NetworkXError),
|
||||
# Popping an empty heap should fail.
|
||||
("pop", nx.NetworkXError),
|
||||
# Getting nonexisting elements should return None.
|
||||
("get", 0, None),
|
||||
("get", x, None),
|
||||
("get", None, None),
|
||||
# Inserting a new key should succeed.
|
||||
("insert", x, 1, True),
|
||||
("get", x, 1),
|
||||
("min", (x, 1)),
|
||||
# min should not pop the top element.
|
||||
("min", (x, 1)),
|
||||
# Inserting a new key of different type should succeed.
|
||||
("insert", 1, -2.0, True),
|
||||
# int and float values should interop.
|
||||
("min", (1, -2.0)),
|
||||
# pop removes minimum-valued element.
|
||||
("insert", 3, -(10**100), True),
|
||||
("insert", 4, 5, True),
|
||||
("pop", (3, -(10**100))),
|
||||
("pop", (1, -2.0)),
|
||||
# Decrease-insert should succeed.
|
||||
("insert", 4, -50, True),
|
||||
("insert", 4, -60, False, True),
|
||||
# Decrease-insert should not create duplicate keys.
|
||||
("pop", (4, -60)),
|
||||
("pop", (x, 1)),
|
||||
# Popping all elements should empty the heap.
|
||||
("min", nx.NetworkXError),
|
||||
("pop", nx.NetworkXError),
|
||||
# Non-value-changing insert should fail.
|
||||
("insert", x, 0, True),
|
||||
("insert", x, 0, False, False),
|
||||
("min", (x, 0)),
|
||||
("insert", x, 0, True, False),
|
||||
("min", (x, 0)),
|
||||
# Failed insert should not create duplicate keys.
|
||||
("pop", (x, 0)),
|
||||
("pop", nx.NetworkXError),
|
||||
# Increase-insert should succeed when allowed.
|
||||
("insert", None, 0, True),
|
||||
("insert", 2, -1, True),
|
||||
("min", (2, -1)),
|
||||
("insert", 2, 1, True, False),
|
||||
("min", (None, 0)),
|
||||
# Increase-insert should fail when disallowed.
|
||||
("insert", None, 2, False, False),
|
||||
("min", (None, 0)),
|
||||
# Failed increase-insert should not create duplicate keys.
|
||||
("pop", (None, 0)),
|
||||
("pop", (2, 1)),
|
||||
("min", nx.NetworkXError),
|
||||
("pop", nx.NetworkXError),
|
||||
]
|
||||
|
||||
|
||||
def _test_heap_class(cls, *args, **kwargs):
|
||||
heap = cls(*args, **kwargs)
|
||||
# Basic behavioral test
|
||||
for op in data:
|
||||
if op[-1] is not nx.NetworkXError:
|
||||
assert op[-1] == getattr(heap, op[0])(*op[1:-1])
|
||||
else:
|
||||
pytest.raises(op[-1], getattr(heap, op[0]), *op[1:-1])
|
||||
# Coverage test.
|
||||
for i in range(99, -1, -1):
|
||||
assert heap.insert(i, i)
|
||||
for i in range(50):
|
||||
assert heap.pop() == (i, i)
|
||||
for i in range(100):
|
||||
assert heap.insert(i, i) == (i < 50)
|
||||
for i in range(100):
|
||||
assert not heap.insert(i, i + 1)
|
||||
for i in range(50):
|
||||
assert heap.pop() == (i, i)
|
||||
for i in range(100):
|
||||
assert heap.insert(i, i + 1) == (i < 50)
|
||||
for i in range(49):
|
||||
assert heap.pop() == (i, i + 1)
|
||||
assert sorted([heap.pop(), heap.pop()]) == [(49, 50), (50, 50)]
|
||||
for i in range(51, 100):
|
||||
assert not heap.insert(i, i + 1, True)
|
||||
for i in range(51, 70):
|
||||
assert heap.pop() == (i, i + 1)
|
||||
for i in range(100):
|
||||
assert heap.insert(i, i)
|
||||
for i in range(100):
|
||||
assert heap.pop() == (i, i)
|
||||
pytest.raises(nx.NetworkXError, heap.pop)
|
||||
|
||||
|
||||
def test_PairingHeap():
|
||||
_test_heap_class(PairingHeap)
|
||||
|
||||
|
||||
def test_BinaryHeap():
|
||||
_test_heap_class(BinaryHeap)
|
|
@ -0,0 +1,268 @@
|
|||
import pytest
|
||||
|
||||
from networkx.utils.mapped_queue import MappedQueue, _HeapElement
|
||||
|
||||
|
||||
def test_HeapElement_gtlt():
|
||||
bar = _HeapElement(1.1, "a")
|
||||
foo = _HeapElement(1, "b")
|
||||
assert foo < bar
|
||||
assert bar > foo
|
||||
assert foo < 1.1
|
||||
assert 1 < bar
|
||||
|
||||
|
||||
def test_HeapElement_gtlt_tied_priority():
|
||||
bar = _HeapElement(1, "a")
|
||||
foo = _HeapElement(1, "b")
|
||||
assert foo > bar
|
||||
assert bar < foo
|
||||
|
||||
|
||||
def test_HeapElement_eq():
|
||||
bar = _HeapElement(1.1, "a")
|
||||
foo = _HeapElement(1, "a")
|
||||
assert foo == bar
|
||||
assert bar == foo
|
||||
assert foo == "a"
|
||||
|
||||
|
||||
def test_HeapElement_iter():
|
||||
foo = _HeapElement(1, "a")
|
||||
bar = _HeapElement(1.1, (3, 2, 1))
|
||||
assert list(foo) == [1, "a"]
|
||||
assert list(bar) == [1.1, 3, 2, 1]
|
||||
|
||||
|
||||
def test_HeapElement_getitem():
|
||||
foo = _HeapElement(1, "a")
|
||||
bar = _HeapElement(1.1, (3, 2, 1))
|
||||
assert foo[1] == "a"
|
||||
assert foo[0] == 1
|
||||
assert bar[0] == 1.1
|
||||
assert bar[2] == 2
|
||||
assert bar[3] == 1
|
||||
pytest.raises(IndexError, bar.__getitem__, 4)
|
||||
pytest.raises(IndexError, foo.__getitem__, 2)
|
||||
|
||||
|
||||
class TestMappedQueue:
|
||||
def setup_method(self):
|
||||
pass
|
||||
|
||||
def _check_map(self, q):
|
||||
assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
|
||||
|
||||
def _make_mapped_queue(self, h):
|
||||
q = MappedQueue()
|
||||
q.heap = h
|
||||
q.position = {elt: pos for pos, elt in enumerate(h)}
|
||||
return q
|
||||
|
||||
def test_heapify(self):
|
||||
h = [5, 4, 3, 2, 1, 0]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._heapify()
|
||||
self._check_map(q)
|
||||
|
||||
def test_init(self):
|
||||
h = [5, 4, 3, 2, 1, 0]
|
||||
q = MappedQueue(h)
|
||||
self._check_map(q)
|
||||
|
||||
def test_incomparable(self):
|
||||
h = [5, 4, "a", 2, 1, 0]
|
||||
pytest.raises(TypeError, MappedQueue, h)
|
||||
|
||||
def test_len(self):
|
||||
h = [5, 4, 3, 2, 1, 0]
|
||||
q = MappedQueue(h)
|
||||
self._check_map(q)
|
||||
assert len(q) == 6
|
||||
|
||||
def test_siftup_leaf(self):
|
||||
h = [2]
|
||||
h_sifted = [2]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftup(0)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_siftup_one_child(self):
|
||||
h = [2, 0]
|
||||
h_sifted = [0, 2]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftup(0)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_siftup_left_child(self):
|
||||
h = [2, 0, 1]
|
||||
h_sifted = [0, 2, 1]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftup(0)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_siftup_right_child(self):
|
||||
h = [2, 1, 0]
|
||||
h_sifted = [0, 1, 2]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftup(0)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_siftup_multiple(self):
|
||||
h = [0, 1, 2, 4, 3, 5, 6]
|
||||
h_sifted = [0, 1, 2, 4, 3, 5, 6]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftup(0)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_siftdown_leaf(self):
|
||||
h = [2]
|
||||
h_sifted = [2]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftdown(0, 0)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_siftdown_single(self):
|
||||
h = [1, 0]
|
||||
h_sifted = [0, 1]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftdown(0, len(h) - 1)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_siftdown_multiple(self):
|
||||
h = [1, 2, 3, 4, 5, 6, 7, 0]
|
||||
h_sifted = [0, 1, 3, 2, 5, 6, 7, 4]
|
||||
q = self._make_mapped_queue(h)
|
||||
q._siftdown(0, len(h) - 1)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_push(self):
|
||||
to_push = [6, 1, 4, 3, 2, 5, 0]
|
||||
h_sifted = [0, 2, 1, 6, 3, 5, 4]
|
||||
q = MappedQueue()
|
||||
for elt in to_push:
|
||||
q.push(elt)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_push_duplicate(self):
|
||||
to_push = [2, 1, 0]
|
||||
h_sifted = [0, 2, 1]
|
||||
q = MappedQueue()
|
||||
for elt in to_push:
|
||||
inserted = q.push(elt)
|
||||
assert inserted
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
inserted = q.push(1)
|
||||
assert not inserted
|
||||
|
||||
def test_pop(self):
|
||||
h = [3, 4, 6, 0, 1, 2, 5]
|
||||
h_sorted = sorted(h)
|
||||
q = self._make_mapped_queue(h)
|
||||
q._heapify()
|
||||
popped = [q.pop() for _ in range(len(h))]
|
||||
assert popped == h_sorted
|
||||
self._check_map(q)
|
||||
|
||||
def test_remove_leaf(self):
|
||||
h = [0, 2, 1, 6, 3, 5, 4]
|
||||
h_removed = [0, 2, 1, 6, 4, 5]
|
||||
q = self._make_mapped_queue(h)
|
||||
removed = q.remove(3)
|
||||
assert q.heap == h_removed
|
||||
|
||||
def test_remove_root(self):
|
||||
h = [0, 2, 1, 6, 3, 5, 4]
|
||||
h_removed = [1, 2, 4, 6, 3, 5]
|
||||
q = self._make_mapped_queue(h)
|
||||
removed = q.remove(0)
|
||||
assert q.heap == h_removed
|
||||
|
||||
def test_update_leaf(self):
|
||||
h = [0, 20, 10, 60, 30, 50, 40]
|
||||
h_updated = [0, 15, 10, 60, 20, 50, 40]
|
||||
q = self._make_mapped_queue(h)
|
||||
removed = q.update(30, 15)
|
||||
assert q.heap == h_updated
|
||||
|
||||
def test_update_root(self):
|
||||
h = [0, 20, 10, 60, 30, 50, 40]
|
||||
h_updated = [10, 20, 35, 60, 30, 50, 40]
|
||||
q = self._make_mapped_queue(h)
|
||||
removed = q.update(0, 35)
|
||||
assert q.heap == h_updated
|
||||
|
||||
|
||||
class TestMappedDict(TestMappedQueue):
|
||||
def _make_mapped_queue(self, h):
|
||||
priority_dict = {elt: elt for elt in h}
|
||||
return MappedQueue(priority_dict)
|
||||
|
||||
def test_init(self):
|
||||
d = {5: 0, 4: 1, "a": 2, 2: 3, 1: 4}
|
||||
q = MappedQueue(d)
|
||||
assert q.position == d
|
||||
|
||||
def test_ties(self):
|
||||
d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4}
|
||||
q = MappedQueue(d)
|
||||
assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
|
||||
|
||||
def test_pop(self):
|
||||
d = {5: 0, 4: 1, 3: 2, 2: 3, 1: 4}
|
||||
q = MappedQueue(d)
|
||||
assert q.pop() == _HeapElement(0, 5)
|
||||
assert q.position == {elt: pos for pos, elt in enumerate(q.heap)}
|
||||
|
||||
def test_empty_pop(self):
|
||||
q = MappedQueue()
|
||||
pytest.raises(IndexError, q.pop)
|
||||
|
||||
def test_incomparable_ties(self):
|
||||
d = {5: 0, 4: 0, "a": 0, 2: 0, 1: 0}
|
||||
pytest.raises(TypeError, MappedQueue, d)
|
||||
|
||||
def test_push(self):
|
||||
to_push = [6, 1, 4, 3, 2, 5, 0]
|
||||
h_sifted = [0, 2, 1, 6, 3, 5, 4]
|
||||
q = MappedQueue()
|
||||
for elt in to_push:
|
||||
q.push(elt, priority=elt)
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
|
||||
def test_push_duplicate(self):
|
||||
to_push = [2, 1, 0]
|
||||
h_sifted = [0, 2, 1]
|
||||
q = MappedQueue()
|
||||
for elt in to_push:
|
||||
inserted = q.push(elt, priority=elt)
|
||||
assert inserted
|
||||
assert q.heap == h_sifted
|
||||
self._check_map(q)
|
||||
inserted = q.push(1, priority=1)
|
||||
assert not inserted
|
||||
|
||||
def test_update_leaf(self):
|
||||
h = [0, 20, 10, 60, 30, 50, 40]
|
||||
h_updated = [0, 15, 10, 60, 20, 50, 40]
|
||||
q = self._make_mapped_queue(h)
|
||||
removed = q.update(30, 15, priority=15)
|
||||
assert q.heap == h_updated
|
||||
|
||||
def test_update_root(self):
|
||||
h = [0, 20, 10, 60, 30, 50, 40]
|
||||
h_updated = [10, 20, 35, 60, 30, 50, 40]
|
||||
q = self._make_mapped_queue(h)
|
||||
removed = q.update(0, 35, priority=35)
|
||||
assert q.heap == h_updated
|
|
@ -0,0 +1,268 @@
|
|||
import random
|
||||
from copy import copy
|
||||
|
||||
import pytest
|
||||
|
||||
import networkx as nx
|
||||
from networkx.utils import (
|
||||
PythonRandomInterface,
|
||||
PythonRandomViaNumpyBits,
|
||||
arbitrary_element,
|
||||
create_py_random_state,
|
||||
create_random_state,
|
||||
dict_to_numpy_array,
|
||||
discrete_sequence,
|
||||
flatten,
|
||||
groups,
|
||||
make_list_of_ints,
|
||||
pairwise,
|
||||
powerlaw_sequence,
|
||||
)
|
||||
from networkx.utils.misc import _dict_to_numpy_array1, _dict_to_numpy_array2
|
||||
|
||||
nested_depth = (
|
||||
1,
|
||||
2,
|
||||
(3, 4, ((5, 6, (7,), (8, (9, 10), 11), (12, 13, (14, 15)), 16), 17), 18, 19),
|
||||
20,
|
||||
)
|
||||
|
||||
nested_set = {
|
||||
(1, 2, 3, 4),
|
||||
(5, 6, 7, 8, 9),
|
||||
(10, 11, (12, 13, 14), (15, 16, 17, 18)),
|
||||
19,
|
||||
20,
|
||||
}
|
||||
|
||||
nested_mixed = [
|
||||
1,
|
||||
(2, 3, {4, (5, 6), 7}, [8, 9]),
|
||||
{10: "foo", 11: "bar", (12, 13): "baz"},
|
||||
{(14, 15): "qwe", 16: "asd"},
|
||||
(17, (18, "19"), 20),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("result", [None, [], ["existing"], ["existing1", "existing2"]])
|
||||
@pytest.mark.parametrize("nested", [nested_depth, nested_mixed, nested_set])
|
||||
def test_flatten(nested, result):
|
||||
if result is None:
|
||||
val = flatten(nested, result)
|
||||
assert len(val) == 20
|
||||
else:
|
||||
_result = copy(result) # because pytest passes parameters as is
|
||||
nexisting = len(_result)
|
||||
val = flatten(nested, _result)
|
||||
assert len(val) == len(_result) == 20 + nexisting
|
||||
|
||||
assert issubclass(type(val), tuple)
|
||||
|
||||
|
||||
def test_make_list_of_ints():
|
||||
mylist = [1, 2, 3.0, 42, -2]
|
||||
assert make_list_of_ints(mylist) is mylist
|
||||
assert make_list_of_ints(mylist) == mylist
|
||||
assert isinstance(make_list_of_ints(mylist)[2], int)
|
||||
pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3, "kermit"])
|
||||
pytest.raises(nx.NetworkXError, make_list_of_ints, [1, 2, 3.1])
|
||||
|
||||
|
||||
def test_random_number_distribution():
|
||||
# smoke test only
|
||||
z = powerlaw_sequence(20, exponent=2.5)
|
||||
z = discrete_sequence(20, distribution=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3])
|
||||
|
||||
|
||||
class TestNumpyArray:
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
global np
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
def test_numpy_to_list_of_ints(self):
|
||||
a = np.array([1, 2, 3], dtype=np.int64)
|
||||
b = np.array([1.0, 2, 3])
|
||||
c = np.array([1.1, 2, 3])
|
||||
assert isinstance(make_list_of_ints(a), list)
|
||||
assert make_list_of_ints(b) == list(b)
|
||||
B = make_list_of_ints(b)
|
||||
assert isinstance(B[0], int)
|
||||
pytest.raises(nx.NetworkXError, make_list_of_ints, c)
|
||||
|
||||
def test__dict_to_numpy_array1(self):
|
||||
d = {"a": 1, "b": 2}
|
||||
a = _dict_to_numpy_array1(d, mapping={"a": 0, "b": 1})
|
||||
np.testing.assert_allclose(a, np.array([1, 2]))
|
||||
a = _dict_to_numpy_array1(d, mapping={"b": 0, "a": 1})
|
||||
np.testing.assert_allclose(a, np.array([2, 1]))
|
||||
|
||||
a = _dict_to_numpy_array1(d)
|
||||
np.testing.assert_allclose(a.sum(), 3)
|
||||
|
||||
def test__dict_to_numpy_array2(self):
|
||||
d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}}
|
||||
|
||||
mapping = {"a": 1, "b": 0}
|
||||
a = _dict_to_numpy_array2(d, mapping=mapping)
|
||||
np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]]))
|
||||
|
||||
a = _dict_to_numpy_array2(d)
|
||||
np.testing.assert_allclose(a.sum(), 33)
|
||||
|
||||
def test_dict_to_numpy_array_a(self):
|
||||
d = {"a": {"a": 1, "b": 2}, "b": {"a": 10, "b": 20}}
|
||||
|
||||
mapping = {"a": 0, "b": 1}
|
||||
a = dict_to_numpy_array(d, mapping=mapping)
|
||||
np.testing.assert_allclose(a, np.array([[1, 2], [10, 20]]))
|
||||
|
||||
mapping = {"a": 1, "b": 0}
|
||||
a = dict_to_numpy_array(d, mapping=mapping)
|
||||
np.testing.assert_allclose(a, np.array([[20, 10], [2, 1]]))
|
||||
|
||||
a = _dict_to_numpy_array2(d)
|
||||
np.testing.assert_allclose(a.sum(), 33)
|
||||
|
||||
def test_dict_to_numpy_array_b(self):
|
||||
d = {"a": 1, "b": 2}
|
||||
|
||||
mapping = {"a": 0, "b": 1}
|
||||
a = dict_to_numpy_array(d, mapping=mapping)
|
||||
np.testing.assert_allclose(a, np.array([1, 2]))
|
||||
|
||||
a = _dict_to_numpy_array1(d)
|
||||
np.testing.assert_allclose(a.sum(), 3)
|
||||
|
||||
|
||||
def test_pairwise():
|
||||
nodes = range(4)
|
||||
node_pairs = [(0, 1), (1, 2), (2, 3)]
|
||||
node_pairs_cycle = node_pairs + [(3, 0)]
|
||||
assert list(pairwise(nodes)) == node_pairs
|
||||
assert list(pairwise(iter(nodes))) == node_pairs
|
||||
assert list(pairwise(nodes, cyclic=True)) == node_pairs_cycle
|
||||
empty_iter = iter(())
|
||||
assert list(pairwise(empty_iter)) == []
|
||||
empty_iter = iter(())
|
||||
assert list(pairwise(empty_iter, cyclic=True)) == []
|
||||
|
||||
|
||||
def test_groups():
|
||||
many_to_one = dict(zip("abcde", [0, 0, 1, 1, 2]))
|
||||
actual = groups(many_to_one)
|
||||
expected = {0: {"a", "b"}, 1: {"c", "d"}, 2: {"e"}}
|
||||
assert actual == expected
|
||||
assert {} == groups({})
|
||||
|
||||
|
||||
def test_create_random_state():
|
||||
np = pytest.importorskip("numpy")
|
||||
rs = np.random.RandomState
|
||||
|
||||
assert isinstance(create_random_state(1), rs)
|
||||
assert isinstance(create_random_state(None), rs)
|
||||
assert isinstance(create_random_state(np.random), rs)
|
||||
assert isinstance(create_random_state(rs(1)), rs)
|
||||
# Support for numpy.random.Generator
|
||||
rng = np.random.default_rng()
|
||||
assert isinstance(create_random_state(rng), np.random.Generator)
|
||||
pytest.raises(ValueError, create_random_state, "a")
|
||||
|
||||
assert np.all(rs(1).rand(10) == create_random_state(1).rand(10))
|
||||
|
||||
|
||||
def test_create_py_random_state():
|
||||
pyrs = random.Random
|
||||
|
||||
assert isinstance(create_py_random_state(1), pyrs)
|
||||
assert isinstance(create_py_random_state(None), pyrs)
|
||||
assert isinstance(create_py_random_state(pyrs(1)), pyrs)
|
||||
pytest.raises(ValueError, create_py_random_state, "a")
|
||||
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
rs = np.random.RandomState
|
||||
rng = np.random.default_rng(1000)
|
||||
rng_explicit = np.random.Generator(np.random.SFC64())
|
||||
old_nprs = PythonRandomInterface
|
||||
nprs = PythonRandomViaNumpyBits
|
||||
assert isinstance(create_py_random_state(np.random), nprs)
|
||||
assert isinstance(create_py_random_state(rs(1)), old_nprs)
|
||||
assert isinstance(create_py_random_state(rng), nprs)
|
||||
assert isinstance(create_py_random_state(rng_explicit), nprs)
|
||||
# test default rng input
|
||||
assert isinstance(PythonRandomInterface(), old_nprs)
|
||||
assert isinstance(PythonRandomViaNumpyBits(), nprs)
|
||||
|
||||
# VeryLargeIntegers Smoke test (they raise error for np.random)
|
||||
int64max = 9223372036854775807 # from np.iinfo(np.int64).max
|
||||
for r in (rng, rs(1)):
|
||||
prs = create_py_random_state(r)
|
||||
prs.randrange(3, int64max + 5)
|
||||
prs.randint(3, int64max + 5)
|
||||
|
||||
|
||||
def test_PythonRandomInterface_RandomState():
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
seed = 42
|
||||
rs = np.random.RandomState
|
||||
rng = PythonRandomInterface(rs(seed))
|
||||
rs42 = rs(seed)
|
||||
|
||||
# make sure these functions are same as expected outcome
|
||||
assert rng.randrange(3, 5) == rs42.randint(3, 5)
|
||||
assert rng.choice([1, 2, 3]) == rs42.choice([1, 2, 3])
|
||||
assert rng.gauss(0, 1) == rs42.normal(0, 1)
|
||||
assert rng.expovariate(1.5) == rs42.exponential(1 / 1.5)
|
||||
assert np.all(rng.shuffle([1, 2, 3]) == rs42.shuffle([1, 2, 3]))
|
||||
assert np.all(
|
||||
rng.sample([1, 2, 3], 2) == rs42.choice([1, 2, 3], (2,), replace=False)
|
||||
)
|
||||
assert np.all(
|
||||
[rng.randint(3, 5) for _ in range(100)]
|
||||
== [rs42.randint(3, 6) for _ in range(100)]
|
||||
)
|
||||
assert rng.random() == rs42.random_sample()
|
||||
|
||||
|
||||
def test_PythonRandomInterface_Generator():
|
||||
np = pytest.importorskip("numpy")
|
||||
|
||||
seed = 42
|
||||
rng = np.random.default_rng(seed)
|
||||
pri = PythonRandomInterface(np.random.default_rng(seed))
|
||||
|
||||
# make sure these functions are same as expected outcome
|
||||
assert pri.randrange(3, 5) == rng.integers(3, 5)
|
||||
assert pri.choice([1, 2, 3]) == rng.choice([1, 2, 3])
|
||||
assert pri.gauss(0, 1) == rng.normal(0, 1)
|
||||
assert pri.expovariate(1.5) == rng.exponential(1 / 1.5)
|
||||
assert np.all(pri.shuffle([1, 2, 3]) == rng.shuffle([1, 2, 3]))
|
||||
assert np.all(
|
||||
pri.sample([1, 2, 3], 2) == rng.choice([1, 2, 3], (2,), replace=False)
|
||||
)
|
||||
assert np.all(
|
||||
[pri.randint(3, 5) for _ in range(100)]
|
||||
== [rng.integers(3, 6) for _ in range(100)]
|
||||
)
|
||||
assert pri.random() == rng.random()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("iterable_type", "expected"), ((list, 1), (tuple, 1), (str, "["), (set, 1))
|
||||
)
|
||||
def test_arbitrary_element(iterable_type, expected):
|
||||
iterable = iterable_type([1, 2, 3])
|
||||
assert arbitrary_element(iterable) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"iterator",
|
||||
((i for i in range(3)), iter([1, 2, 3])), # generator
|
||||
)
|
||||
def test_arbitrary_element_raises(iterator):
|
||||
"""Value error is raised when input is an iterator."""
|
||||
with pytest.raises(ValueError, match="from an iterator"):
|
||||
arbitrary_element(iterator)
|
|
@ -0,0 +1,38 @@
|
|||
import pytest
|
||||
|
||||
from networkx.utils import (
|
||||
powerlaw_sequence,
|
||||
random_weighted_sample,
|
||||
weighted_choice,
|
||||
zipf_rv,
|
||||
)
|
||||
|
||||
|
||||
def test_degree_sequences():
|
||||
seq = powerlaw_sequence(10, seed=1)
|
||||
seq = powerlaw_sequence(10)
|
||||
assert len(seq) == 10
|
||||
|
||||
|
||||
def test_zipf_rv():
|
||||
r = zipf_rv(2.3, xmin=2, seed=1)
|
||||
r = zipf_rv(2.3, 2, 1)
|
||||
r = zipf_rv(2.3)
|
||||
assert type(r), int
|
||||
pytest.raises(ValueError, zipf_rv, 0.5)
|
||||
pytest.raises(ValueError, zipf_rv, 2, xmin=0)
|
||||
|
||||
|
||||
def test_random_weighted_sample():
|
||||
mapping = {"a": 10, "b": 20}
|
||||
s = random_weighted_sample(mapping, 2, seed=1)
|
||||
s = random_weighted_sample(mapping, 2)
|
||||
assert sorted(s) == sorted(mapping.keys())
|
||||
pytest.raises(ValueError, random_weighted_sample, mapping, 3)
|
||||
|
||||
|
||||
def test_random_weighted_choice():
|
||||
mapping = {"a": 10, "b": 0}
|
||||
c = weighted_choice(mapping, seed=1)
|
||||
c = weighted_choice(mapping)
|
||||
assert c == "a"
|
|
@ -0,0 +1,63 @@
|
|||
import networkx as nx
|
||||
from networkx.utils import reverse_cuthill_mckee_ordering
|
||||
|
||||
|
||||
def test_reverse_cuthill_mckee():
|
||||
# example graph from
|
||||
# http://www.boost.org/doc/libs/1_37_0/libs/graph/example/cuthill_mckee_ordering.cpp
|
||||
G = nx.Graph(
|
||||
[
|
||||
(0, 3),
|
||||
(0, 5),
|
||||
(1, 2),
|
||||
(1, 4),
|
||||
(1, 6),
|
||||
(1, 9),
|
||||
(2, 3),
|
||||
(2, 4),
|
||||
(3, 5),
|
||||
(3, 8),
|
||||
(4, 6),
|
||||
(5, 6),
|
||||
(5, 7),
|
||||
(6, 7),
|
||||
]
|
||||
)
|
||||
rcm = list(reverse_cuthill_mckee_ordering(G))
|
||||
assert rcm in [[0, 8, 5, 7, 3, 6, 2, 4, 1, 9], [0, 8, 5, 7, 3, 6, 4, 2, 1, 9]]
|
||||
|
||||
|
||||
def test_rcm_alternate_heuristic():
|
||||
# example from
|
||||
G = nx.Graph(
|
||||
[
|
||||
(0, 0),
|
||||
(0, 4),
|
||||
(1, 1),
|
||||
(1, 2),
|
||||
(1, 5),
|
||||
(1, 7),
|
||||
(2, 2),
|
||||
(2, 4),
|
||||
(3, 3),
|
||||
(3, 6),
|
||||
(4, 4),
|
||||
(5, 5),
|
||||
(5, 7),
|
||||
(6, 6),
|
||||
(7, 7),
|
||||
]
|
||||
)
|
||||
|
||||
answers = [
|
||||
[6, 3, 5, 7, 1, 2, 4, 0],
|
||||
[6, 3, 7, 5, 1, 2, 4, 0],
|
||||
[7, 5, 1, 2, 4, 0, 6, 3],
|
||||
]
|
||||
|
||||
def smallest_degree(G):
|
||||
deg, node = min((d, n) for n, d in G.degree())
|
||||
return node
|
||||
|
||||
rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
|
||||
assert rcm in answers
|
|
@ -0,0 +1,55 @@
|
|||
import networkx as nx
|
||||
|
||||
|
||||
def test_unionfind():
|
||||
# Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
|
||||
# Previously (in 2.x), the UnionFind class could handle mixed types.
|
||||
# But in Python 3.x, this causes a TypeError such as:
|
||||
# TypeError: unorderable types: str() > int()
|
||||
#
|
||||
# Now we just make sure that no exception is raised.
|
||||
x = nx.utils.UnionFind()
|
||||
x.union(0, "a")
|
||||
|
||||
|
||||
def test_subtree_union():
|
||||
# See https://github.com/networkx/networkx/pull/3224
|
||||
# (35db1b551ee65780794a357794f521d8768d5049).
|
||||
# Test if subtree unions hare handled correctly by to_sets().
|
||||
uf = nx.utils.UnionFind()
|
||||
uf.union(1, 2)
|
||||
uf.union(3, 4)
|
||||
uf.union(4, 5)
|
||||
uf.union(1, 5)
|
||||
assert list(uf.to_sets()) == [{1, 2, 3, 4, 5}]
|
||||
|
||||
|
||||
def test_unionfind_weights():
|
||||
# Tests if weights are computed correctly with unions of many elements
|
||||
uf = nx.utils.UnionFind()
|
||||
uf.union(1, 4, 7)
|
||||
uf.union(2, 5, 8)
|
||||
uf.union(3, 6, 9)
|
||||
uf.union(1, 2, 3, 4, 5, 6, 7, 8, 9)
|
||||
assert uf.weights[uf[1]] == 9
|
||||
|
||||
|
||||
def test_unbalanced_merge_weights():
|
||||
# Tests if the largest set's root is used as the new root when merging
|
||||
uf = nx.utils.UnionFind()
|
||||
uf.union(1, 2, 3)
|
||||
uf.union(4, 5, 6, 7, 8, 9)
|
||||
assert uf.weights[uf[1]] == 3
|
||||
assert uf.weights[uf[4]] == 6
|
||||
largest_root = uf[4]
|
||||
uf.union(1, 4)
|
||||
assert uf[1] == largest_root
|
||||
assert uf.weights[largest_root] == 9
|
||||
|
||||
|
||||
def test_empty_union():
|
||||
# Tests if a null-union does nothing.
|
||||
uf = nx.utils.UnionFind((0, 1))
|
||||
uf.union()
|
||||
assert uf[0] == 0
|
||||
assert uf[1] == 1
|
106
venv/lib/python3.13/site-packages/networkx/utils/union_find.py
Normal file
106
venv/lib/python3.13/site-packages/networkx/utils/union_find.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
"""
|
||||
Union-find data structure.
|
||||
"""
|
||||
|
||||
from networkx.utils import groups
|
||||
|
||||
|
||||
class UnionFind:
|
||||
"""Union-find data structure.
|
||||
|
||||
Each unionFind instance X maintains a family of disjoint sets of
|
||||
hashable objects, supporting the following two methods:
|
||||
|
||||
- X[item] returns a name for the set containing the given item.
|
||||
Each set is named by an arbitrarily-chosen one of its members; as
|
||||
long as the set remains unchanged it will keep the same name. If
|
||||
the item is not yet part of a set in X, a new singleton set is
|
||||
created for it.
|
||||
|
||||
- X.union(item1, item2, ...) merges the sets containing each item
|
||||
into a single larger set. If any item is not yet part of a set
|
||||
in X, it is added to X as one of the members of the merged set.
|
||||
|
||||
Union-find data structure. Based on Josiah Carlson's code,
|
||||
https://code.activestate.com/recipes/215912/
|
||||
with significant additional changes by D. Eppstein.
|
||||
http://www.ics.uci.edu/~eppstein/PADS/UnionFind.py
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, elements=None):
|
||||
"""Create a new empty union-find structure.
|
||||
|
||||
If *elements* is an iterable, this structure will be initialized
|
||||
with the discrete partition on the given set of elements.
|
||||
|
||||
"""
|
||||
if elements is None:
|
||||
elements = ()
|
||||
self.parents = {}
|
||||
self.weights = {}
|
||||
for x in elements:
|
||||
self.weights[x] = 1
|
||||
self.parents[x] = x
|
||||
|
||||
def __getitem__(self, object):
|
||||
"""Find and return the name of the set containing the object."""
|
||||
|
||||
# check for previously unknown object
|
||||
if object not in self.parents:
|
||||
self.parents[object] = object
|
||||
self.weights[object] = 1
|
||||
return object
|
||||
|
||||
# find path of objects leading to the root
|
||||
path = []
|
||||
root = self.parents[object]
|
||||
while root != object:
|
||||
path.append(object)
|
||||
object = root
|
||||
root = self.parents[object]
|
||||
|
||||
# compress the path and return
|
||||
for ancestor in path:
|
||||
self.parents[ancestor] = root
|
||||
return root
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate through all items ever found or unioned by this structure."""
|
||||
return iter(self.parents)
|
||||
|
||||
def to_sets(self):
|
||||
"""Iterates over the sets stored in this structure.
|
||||
|
||||
For example::
|
||||
|
||||
>>> partition = UnionFind("xyz")
|
||||
>>> sorted(map(sorted, partition.to_sets()))
|
||||
[['x'], ['y'], ['z']]
|
||||
>>> partition.union("x", "y")
|
||||
>>> sorted(map(sorted, partition.to_sets()))
|
||||
[['x', 'y'], ['z']]
|
||||
|
||||
"""
|
||||
# Ensure fully pruned paths
|
||||
for x in self.parents:
|
||||
_ = self[x] # Evaluated for side-effect only
|
||||
|
||||
yield from groups(self.parents).values()
|
||||
|
||||
def union(self, *objects):
|
||||
"""Find the sets containing the objects and merge them all."""
|
||||
# Find the heaviest root according to its weight.
|
||||
roots = iter(
|
||||
sorted(
|
||||
{self[x] for x in objects}, key=lambda r: self.weights[r], reverse=True
|
||||
)
|
||||
)
|
||||
try:
|
||||
root = next(roots)
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
for r in roots:
|
||||
self.weights[root] += self.weights[r]
|
||||
self.parents[r] = root
|
Loading…
Add table
Add a link
Reference in a new issue