Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

replace black, isort, pylint with ruff #1069

Merged
merged 6 commits into from
May 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ jobs:
- name: Install dependencies
run: poetry install
- name: Run Format Check
run: poetry run poe format_check
run: poetry run poe static_checks
test-coverage:
runs-on: ubuntu-latest
strategy:
Expand Down
9 changes: 5 additions & 4 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@
import os
import sys

# Use RTD Theme
import sphinx_rtd_theme

import graspologic

sys.path.append(os.path.abspath("./sphinx-ext/"))
sys.path.insert(0, os.path.abspath(".."))

Expand All @@ -28,7 +33,6 @@
dir_realpath = os.path.dirname(realpath)
sys.path.append(dir_realpath)

import graspologic

version = graspologic.__version__
# Append "dev" and the github run to the version when on the dev branch
Expand Down Expand Up @@ -117,9 +121,6 @@
pygments_style = "sphinx"
smartquotes = False

# Use RTD Theme
import sphinx_rtd_theme

html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
Expand Down
9 changes: 5 additions & 4 deletions graspologic/cluster/autogmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,7 @@ def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> "AutoGMMCluster"
agg = AgglomerativeClustering(
n_clusters=self.min_components,
metric=affinity,
**p_ag_without_affinity
**p_ag_without_affinity,
)
agg.fit(X_subset)
hierarchical_labels = _hierarchical_labels(
Expand Down Expand Up @@ -762,9 +762,10 @@ def _hierarchical_labels(
inds = np.where(np.isin(hierarchical_labels[:, n], children[n, :]))[0]
hierarchical_labels[inds, -1] = n_samples + n
if n < merge_end:
hierarchical_labels = np.hstack(
(hierarchical_labels, hierarchical_labels[:, -1].reshape((-1, 1)))
)
hierarchical_labels = np.hstack((
hierarchical_labels,
hierarchical_labels[:, -1].reshape((-1, 1)),
))

hierarchical_labels = hierarchical_labels[:, merge_start:]
for i in range(hierarchical_labels.shape[1]):
Expand Down
9 changes: 5 additions & 4 deletions graspologic/cluster/divisive_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def _cluster_and_decide(self, X: np.ndarray) -> np.ndarray:
cluster = AutoGMMCluster(
min_components=min_components,
max_components=self.max_components,
**self.cluster_kws
**self.cluster_kws,
)
cluster.fit(X)
model = cluster.model_
Expand Down Expand Up @@ -275,9 +275,10 @@ def _fit(self, X: np.ndarray) -> np.ndarray:
):
child_labels = dc._fit(new_X)
while labels.shape[1] <= child_labels.shape[1]:
labels = np.column_stack(
(labels, np.zeros((len(X), 1), dtype=int))
)
labels = np.column_stack((
labels,
np.zeros((len(X), 1), dtype=int),
))
labels[inds, 1 : child_labels.shape[1] + 1] = child_labels
else:
# make a "GaussianMixture" model for clusters
Expand Down
4 changes: 1 addition & 3 deletions graspologic/cluster/kclust.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,7 @@ def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> "KMeansCluster":
# Deal with number of clusters
if self.max_clusters > X.shape[0]:
msg = "n_components must be >= n_samples, but got \
n_components = {}, n_samples = {}".format(
self.max_clusters, X.shape[0]
)
n_components = {}, n_samples = {}".format(self.max_clusters, X.shape[0])
raise ValueError(msg)
else:
max_clusters = self.max_clusters
Expand Down
2 changes: 1 addition & 1 deletion graspologic/embed/ase.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def fit(
graph: GraphRepresentation,
y: Optional[Any] = None,
*args: Any,
**kwargs: Any
**kwargs: Any,
) -> "AdjacencySpectralEmbed":
"""
Fit ASE model to input graph
Expand Down
8 changes: 4 additions & 4 deletions graspologic/embed/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def fit(
graph: GraphRepresentation,
y: Optional[Any] = None,
*args: Any,
**kwargs: Any
**kwargs: Any,
) -> "BaseSpectralEmbed":
"""
A method for embedding.
Expand Down Expand Up @@ -221,7 +221,7 @@ def fit_transform(
graph: GraphRepresentation,
y: Optional[Any] = None,
*args: Any,
**kwargs: Any
**kwargs: Any,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Fit the model with graphs and apply the transformation.
Expand Down Expand Up @@ -312,8 +312,8 @@ def transform(self, X): # type: ignore
# correct types?
if directed and not isinstance(X, tuple):
if X.shape[0] == X.shape[1]: # in case original matrix was passed
msg = """A square matrix A was passed to ``transform`` in the directed case.
If this was the original in-sample matrix, either use ``fit_transform``
msg = """A square matrix A was passed to ``transform`` in the directed case.
If this was the original in-sample matrix, either use ``fit_transform``
or pass a tuple (A.T, A). If this was an out-of-sample matrix, directed
graphs require a tuple (X_out, X_in)."""
raise TypeError(msg)
Expand Down
2 changes: 1 addition & 1 deletion graspologic/embed/lse.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def fit(
graph: GraphRepresentation,
y: Optional[Any] = None,
*args: Any,
**kwargs: Any
**kwargs: Any,
) -> "LaplacianSpectralEmbed":
"""
Fit LSE model to input graph
Expand Down
20 changes: 8 additions & 12 deletions graspologic/embed/mase.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,18 +172,14 @@ def _reduce_dim(self, graphs): # type: ignore
Vs = np.hstack([V.T[:, :best_dimension] for V in Vs])
else:
# Equivalent to ASE
Us = np.hstack(
[
U[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for U, D in zip(Us, Ds)
]
)
Vs = np.hstack(
[
V.T[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for V, D in zip(Vs, Ds)
]
)
Us = np.hstack([
U[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for U, D in zip(Us, Ds)
])
Vs = np.hstack([
V.T[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))
for V, D in zip(Vs, Ds)
])

# Second SVD for vertices
# The notation is slightly different than the paper
Expand Down
7 changes: 4 additions & 3 deletions graspologic/embed/mds.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.

from typing import Any, Optional, Union
from typing import TYPE_CHECKING, Any, Optional, Union

import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from typing_extensions import Literal

from graspologic.types import Tuple

from ..utils import is_symmetric
from .svd import SvdAlgorithmType, select_svd

if TYPE_CHECKING:
from graspologic.types import Tuple


def _get_centering_matrix(n: int) -> np.ndarray:
"""
Expand Down
4 changes: 2 additions & 2 deletions graspologic/embed/omni.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def _get_omnibus_matrix_sparse(matrices: List[csr_array]) -> csr_array:


def _get_laplacian_matrices(
graphs: Union[np.ndarray, List[GraphRepresentation]]
graphs: Union[np.ndarray, List[GraphRepresentation]],
) -> Union[np.ndarray, List[np.ndarray]]:
"""
Helper function to convert graph adjacency matrices to graph Laplacian
Expand All @@ -82,7 +82,7 @@ def _get_laplacian_matrices(


def _get_omni_matrix(
graphs: Union[AdjacencyMatrix, List[AdjacencyMatrix]]
graphs: Union[AdjacencyMatrix, List[AdjacencyMatrix]],
) -> np.ndarray:
"""
Helper function for creating the omnibus matrix.
Expand Down
2 changes: 1 addition & 1 deletion graspologic/embed/svd.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def _compute_likelihood(arr: np.ndarray) -> np.ndarray:
mu2 = -np.inf

# compute pooled variance
variance = ((np.sum((s1 - mu1) ** 2) + np.sum((s2 - mu2) ** 2))) / (
variance = (np.sum((s1 - mu1) ** 2) + np.sum((s2 - mu2) ** 2)) / (
n_elements - 1 - (idx < n_elements)
)
std = np.sqrt(variance)
Expand Down
2 changes: 1 addition & 1 deletion graspologic/inference/binomial.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,6 @@ def binom_2samp(
value=null_ratio,
)
else:
raise ValueError()
raise ValueError

return BinomialResult(stat, pvalue)
10 changes: 5 additions & 5 deletions graspologic/inference/group_connection_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,18 +378,18 @@ def group_connection_test(
B1, n_observed1, n_possible1, group_counts1 = fit_sbm(A1, labels1)
B2, n_observed2, n_possible2, group_counts2 = fit_sbm(A2, labels2)
if not n_observed1.index.equals(n_observed2.index):
raise ValueError()
raise ValueError
elif not n_observed1.columns.equals(n_observed2.columns):
raise ValueError()
raise ValueError
elif not n_possible1.index.equals(n_possible2.index):
raise ValueError()
raise ValueError
elif not n_observed1.columns.equals(n_observed2.columns):
raise ValueError()
raise ValueError

index = n_observed1.index.copy()

if n_observed1.shape[0] != n_observed2.shape[0]:
raise ValueError()
raise ValueError

K = n_observed1.shape[0]

Expand Down
6 changes: 3 additions & 3 deletions graspologic/inference/latent_position_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,9 @@ def latent_position_test(
if type(embedding) is not str:
raise TypeError("embedding must be str")
if type(n_bootstraps) is not int:
raise TypeError()
raise TypeError
if type(test_case) is not str:
raise TypeError()
raise TypeError
if n_bootstraps < 1:
raise ValueError(
"{} is invalid number of bootstraps, must be greater than 1".format(
Expand All @@ -148,7 +148,7 @@ def latent_position_test(
A1 = import_graph(A1)
A2 = import_graph(A2)
if not is_symmetric(A1) or not is_symmetric(A2):
raise NotImplementedError() # TODO asymmetric case
raise NotImplementedError # TODO asymmetric case
if A1.shape != A2.shape:
raise ValueError("Input matrices do not have matching dimensions")
num_components: int
Expand Down
2 changes: 1 addition & 1 deletion graspologic/layouts/nooverlap/_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def get_all_grid_cells(self) -> List[List[int]]:

def print_stats(self) -> None:
print(
f"cell size: {self.cell_size}, area: {self.cell_size*self.cell_size}, "
f"cell size: {self.cell_size}, area: {self.cell_size * self.cell_size}, "
f"rows: {self._get_y_cells()}, cols: {self._get_x_cells()}"
)

Expand Down
2 changes: 1 addition & 1 deletion graspologic/layouts/nooverlap/nooverlap.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def remove_overlaps(node_positions: List[NodePosition]) -> List[NodePosition]:
qt = _QuadTree(local_nodes, 50)
qt.layout_dense_first(first_color=None)
stop = time.time()
logger.info(f"removed overlap in {stop-start} seconds")
logger.info(f"removed overlap in {stop - start} seconds")

new_positions = [
NodePosition(
Expand Down
7 changes: 4 additions & 3 deletions graspologic/match/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,9 +430,10 @@ def finalize(self, P: np.ndarray, rng: np.random.Generator) -> None:
permutation = np.array([], dtype=int)

# deal with seed-nonseed sorting from the initialization
permutation = np.concatenate(
(np.arange(self.n_seeds), permutation + self.n_seeds)
)
permutation = np.concatenate((
np.arange(self.n_seeds),
permutation + self.n_seeds,
))
final_permutation = np.empty(self.n, dtype=int)
final_permutation[self.perm_A] = self.perm_B[permutation]

Expand Down
2 changes: 1 addition & 1 deletion graspologic/match/wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class MatchResult(NamedTuple):
indices_B: np.ndarray
"""
Indices in ``B`` which were matched. Element ``indices_B[i]`` was matched
to element ``indices_A[i]``. ``indices_B`` can also be thought of as a
to element ``indices_A[i]``. ``indices_B`` can also be thought of as a
permutation of the nodes of ``B`` with respect to ``A``.
"""

Expand Down
4 changes: 2 additions & 2 deletions graspologic/models/sbm_estimators.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def _estimate_assignments(self, graph: GraphRepresentation) -> None:
gc = GaussianCluster(
min_components=self.min_comm,
max_components=self.max_comm,
**self.cluster_kws
**self.cluster_kws,
)
vertex_assignments = gc.fit_predict(latent) # type: ignore
self.vertex_assignments_ = vertex_assignments
Expand Down Expand Up @@ -374,7 +374,7 @@ def _estimate_assignments(self, graph: GraphRepresentation) -> None:
gc = GaussianCluster(
min_components=self.min_comm,
max_components=self.max_comm,
**self.cluster_kws
**self.cluster_kws,
)
self.vertex_assignments_ = gc.fit_predict(latent) # type: ignore

Expand Down
4 changes: 1 addition & 3 deletions graspologic/nominate/VNviaSGM.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,9 +259,7 @@ def fit(
if len(close_seeds) <= 0:
warnings.warn(
'Voi {} was not a member of the induced subgraph A[{}], \
Try increasing "order_voi_subgraph"'.format(
voi, seedsA
)
Try increasing "order_voi_subgraph"'.format(voi, seedsA)
)
self.n_seeds_ = None
self.nomination_list_ = None
Expand Down
4 changes: 2 additions & 2 deletions graspologic/partition/leiden.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,8 +352,8 @@ class HierarchicalCluster(NamedTuple):
"""Only used when level != 0, but will indicate the previous cluster id that this node was in"""
level: int
"""
Each time a community has a higher population than we would like, we create a subnetwork
of that community and process it again to break it into smaller chunks. Each time we
Each time a community has a higher population than we would like, we create a subnetwork
of that community and process it again to break it into smaller chunks. Each time we
detect this, the level increases by 1
"""
is_final_cluster: bool
Expand Down
4 changes: 2 additions & 2 deletions graspologic/pipeline/embed/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

# ruff: noqa: E402 SVD_SOLVER_TYPES needs to be first
"""
The embed module of ``graspologic.pipeline.embed`` is intended to provide faster
application development support. The functions provided in it reflect common call
Expand All @@ -9,8 +9,8 @@
"""

__SVD_SOLVER_TYPES = ["randomized", "full", "truncated"]

from .adjacency_spectral_embedding import adjacency_spectral_embedding
from .embeddings import Embeddings, EmbeddingsView
from .laplacian_spectral_embedding import laplacian_spectral_embedding
from .omnibus_embedding import omnibus_embedding_pairwise

2 changes: 1 addition & 1 deletion graspologic/pipeline/embed/_elbow.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def _compute_likelihood(arr: np.ndarray) -> np.ndarray:
mu2 = -np.inf

# compute pooled variance
variance = ((np.sum((s1 - mu1) ** 2) + np.sum((s2 - mu2) ** 2))) / (
variance = (np.sum((s1 - mu1) ** 2) + np.sum((s2 - mu2) ** 2)) / (
n_elements - 1 - (idx < n_elements)
)
std = np.sqrt(variance)
Expand Down
10 changes: 4 additions & 6 deletions graspologic/pipeline/embed/omnibus_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,12 +220,10 @@ def omnibus_embedding_pairwise(
elbow_cut, graph.is_directed(), model.singular_values_, current_embedding
)

graph_embeddings.append(
(
Embeddings(union_node_ids, previous_embedding_cut),
Embeddings(union_node_ids, current_embedding_cut),
)
)
graph_embeddings.append((
Embeddings(union_node_ids, previous_embedding_cut),
Embeddings(union_node_ids, current_embedding_cut),
))

return graph_embeddings

Expand Down
Loading
Loading