Skip to content

Commit

Permalink
Configure isort to use the Black profile, recursively isort the `thin…
Browse files Browse the repository at this point in the history
…c` module (#880)

* Use isort with the Black profile

* isort the thinc module

* Fix import cycles as a result of import sorting

* Add isort to requirements
  • Loading branch information
danieldk committed Jun 14, 2023
1 parent e581158 commit febb00f
Show file tree
Hide file tree
Showing 150 changed files with 1,014 additions and 612 deletions.
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,6 @@ requires = [
"numpy>=1.15.0",
]
build-backend = "setuptools.build_meta"

[tool.isort]
profile = "black"
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -36,3 +36,4 @@ nbformat>=5.0.4,<5.2.0
# Test to_disk/from_disk against pathlib.Path subclasses
pathy>=0.3.5
black>=22.0,<23.0
isort>=5.0,<6.0
1 change: 0 additions & 1 deletion thinc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from .about import __version__
from .config import registry


# fmt: off
__all__ = [
"registry",
Expand Down
207 changes: 159 additions & 48 deletions thinc/api.py
Original file line number Diff line number Diff line change
@@ -1,52 +1,163 @@
from .config import Config, registry, ConfigValidationError
from .initializers import normal_init, uniform_init, glorot_uniform_init, zero_init
from .initializers import configure_normal_init
from .loss import CategoricalCrossentropy, L2Distance, CosineDistance
from .loss import SequenceCategoricalCrossentropy
from .model import Model, serialize_attr, deserialize_attr
from .model import set_dropout_rate, change_attr_values, wrap_model_recursive
from .shims import Shim, PyTorchGradScaler, PyTorchShim, TensorFlowShim, keras_model_fns
from .shims import MXNetShim, TorchScriptShim, maybe_handshake_model
from .optimizers import Adam, RAdam, SGD, Optimizer
from .schedules import cyclic_triangular, warmup_linear, constant, constant_then
from .schedules import decaying, slanted_triangular, compounding
from .types import Ragged, Padded, ArgsKwargs, Unserializable
from .util import fix_random_seed, is_cupy_array, set_active_gpu
from .util import prefer_gpu, require_gpu, require_cpu
from .util import DataValidationError, data_validation
from .util import to_categorical, get_width, get_array_module, to_numpy
from .util import torch2xp, xp2torch, tensorflow2xp, xp2tensorflow, mxnet2xp, xp2mxnet
from .util import get_torch_default_device
from .backends import (
CupyOps,
MPSOps,
NumpyOps,
Ops,
get_current_ops,
get_ops,
set_current_ops,
set_gpu_allocator,
use_ops,
use_pytorch_for_gpu_memory,
use_tensorflow_for_gpu_memory,
)
from .compat import has_cupy
from .backends import get_ops, set_current_ops, get_current_ops, use_ops
from .backends import Ops, CupyOps, MPSOps, NumpyOps, set_gpu_allocator
from .backends import use_pytorch_for_gpu_memory, use_tensorflow_for_gpu_memory

from .layers import Dropout, Embed, expand_window, HashEmbed, LayerNorm, Linear
from .layers import Maxout, Mish, MultiSoftmax, Relu, softmax_activation, Softmax, LSTM
from .layers import CauchySimilarity, ParametricAttention, Logistic
from .layers import resizable, sigmoid_activation, Sigmoid, SparseLinear
from .layers import SparseLinear_v2, ClippedLinear, ReluK, HardTanh, HardSigmoid
from .layers import Dish, HardSwish, HardSwishMobilenet, Swish, Gelu
from .layers import PyTorchWrapper, PyTorchRNNWrapper, PyTorchLSTM
from .layers import TensorFlowWrapper, keras_subclass, MXNetWrapper
from .layers import PyTorchWrapper_v2, Softmax_v2, PyTorchWrapper_v3
from .layers import TorchScriptWrapper_v1, pytorch_to_torchscript_wrapper

from .layers import add, bidirectional, chain, clone, concatenate, noop
from .layers import residual, uniqued, siamese, list2ragged, ragged2list
from .layers import map_list
from .layers import with_array, with_array2d
from .layers import with_padded, with_list, with_ragged, with_flatten
from .layers import with_reshape, with_getitem, strings2arrays, list2array
from .layers import list2ragged, ragged2list, list2padded, padded2list
from .layers import remap_ids, remap_ids_v2, premap_ids
from .layers import array_getitem, with_cpu, with_debug, with_nvtx_range
from .layers import with_signpost_interval
from .layers import tuplify, with_flatten_v2

from .layers import reduce_first, reduce_last, reduce_max, reduce_mean, reduce_sum

from .config import Config, ConfigValidationError, registry
from .initializers import (
configure_normal_init,
glorot_uniform_init,
normal_init,
uniform_init,
zero_init,
)
from .layers import (
LSTM,
CauchySimilarity,
ClippedLinear,
Dish,
Dropout,
Embed,
Gelu,
HardSigmoid,
HardSwish,
HardSwishMobilenet,
HardTanh,
HashEmbed,
LayerNorm,
Linear,
Logistic,
Maxout,
Mish,
MultiSoftmax,
MXNetWrapper,
ParametricAttention,
PyTorchLSTM,
PyTorchRNNWrapper,
PyTorchWrapper,
PyTorchWrapper_v2,
PyTorchWrapper_v3,
Relu,
ReluK,
Sigmoid,
Softmax,
Softmax_v2,
SparseLinear,
SparseLinear_v2,
Swish,
TensorFlowWrapper,
TorchScriptWrapper_v1,
add,
array_getitem,
bidirectional,
chain,
clone,
concatenate,
expand_window,
keras_subclass,
list2array,
list2padded,
list2ragged,
map_list,
noop,
padded2list,
premap_ids,
pytorch_to_torchscript_wrapper,
ragged2list,
reduce_first,
reduce_last,
reduce_max,
reduce_mean,
reduce_sum,
remap_ids,
remap_ids_v2,
residual,
resizable,
siamese,
sigmoid_activation,
softmax_activation,
strings2arrays,
tuplify,
uniqued,
with_array,
with_array2d,
with_cpu,
with_debug,
with_flatten,
with_flatten_v2,
with_getitem,
with_list,
with_nvtx_range,
with_padded,
with_ragged,
with_reshape,
with_signpost_interval,
)
from .loss import (
CategoricalCrossentropy,
CosineDistance,
L2Distance,
SequenceCategoricalCrossentropy,
)
from .model import (
Model,
change_attr_values,
deserialize_attr,
serialize_attr,
set_dropout_rate,
wrap_model_recursive,
)
from .optimizers import SGD, Adam, Optimizer, RAdam
from .schedules import (
compounding,
constant,
constant_then,
cyclic_triangular,
decaying,
slanted_triangular,
warmup_linear,
)
from .shims import (
MXNetShim,
PyTorchGradScaler,
PyTorchShim,
Shim,
TensorFlowShim,
TorchScriptShim,
keras_model_fns,
maybe_handshake_model,
)
from .types import ArgsKwargs, Padded, Ragged, Unserializable
from .util import (
DataValidationError,
data_validation,
fix_random_seed,
get_array_module,
get_torch_default_device,
get_width,
is_cupy_array,
mxnet2xp,
prefer_gpu,
require_cpu,
require_gpu,
set_active_gpu,
tensorflow2xp,
to_categorical,
to_numpy,
torch2xp,
xp2mxnet,
xp2tensorflow,
xp2torch,
)

# fmt: off
__all__ = [
Expand Down
27 changes: 15 additions & 12 deletions thinc/backends/__init__.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,23 @@
import contextlib
from typing import Type, Dict, Any, Callable, Optional, cast

from contextvars import ContextVar
import threading
from contextvars import ContextVar
from typing import Any, Callable, Dict, Optional, Type, cast

from .ops import Ops
from .cupy_ops import CupyOps
from .numpy_ops import NumpyOps
from .mps_ops import MPSOps
from ._cupy_allocators import cupy_tensorflow_allocator, cupy_pytorch_allocator
from ._param_server import ParamServer
from ..util import assert_tensorflow_installed, assert_pytorch_installed
from ..util import get_torch_default_device, is_cupy_array, require_cpu
from .. import registry
from ..compat import cupy, has_cupy

from ..util import (
assert_pytorch_installed,
assert_tensorflow_installed,
get_torch_default_device,
is_cupy_array,
require_cpu,
)
from ._cupy_allocators import cupy_pytorch_allocator, cupy_tensorflow_allocator
from ._param_server import ParamServer
from .cupy_ops import CupyOps
from .mps_ops import MPSOps
from .numpy_ops import NumpyOps
from .ops import Ops

context_ops: ContextVar[Optional[Ops]] = ContextVar("context_ops", default=None)
context_pools: ContextVar[dict] = ContextVar("context_pools", default={})
Expand Down
2 changes: 1 addition & 1 deletion thinc/backends/_cupy_allocators.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from typing import cast

from ..compat import cupy, tensorflow, torch
from ..types import ArrayXd
from ..util import get_torch_default_device, tensorflow2xp
from ..compat import torch, cupy, tensorflow


def cupy_tensorflow_allocator(size_in_bytes: int):
Expand Down
11 changes: 6 additions & 5 deletions thinc/backends/_custom_kernels.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
from typing import Callable, Optional, Tuple
from functools import reduce
import numpy
import operator
import re
from pathlib import Path
from collections import defaultdict
from ..compat import cupy, has_cupy_gpu
from functools import reduce
from pathlib import Path
from typing import Callable, Optional, Tuple

import numpy

from ..compat import cupy, has_cupy_gpu

PWD = Path(__file__).parent
KERNELS_SRC = (PWD / "_custom_kernels.cu").read_text(encoding="utf8")
Expand Down
3 changes: 1 addition & 2 deletions thinc/backends/_param_server.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
from typing import Dict, Tuple, Optional, Any
from typing import Any, Dict, Optional, Tuple

from ..types import FloatsXd
from ..util import get_array_module


KeyT = Tuple[int, str]


Expand Down
1 change: 0 additions & 1 deletion thinc/backends/cblas.pxd
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from libcpp.memory cimport shared_ptr


ctypedef void (*sgemm_ptr)(bint transA, bint transB, int M, int N, int K,
float alpha, const float* A, int lda, const float *B,
int ldb, float beta, float* C, int ldc) nogil
Expand Down
21 changes: 14 additions & 7 deletions thinc/backends/cupy_ops.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,20 @@
import numpy

from .. import registry
from .ops import Ops
from .numpy_ops import NumpyOps
from . import _custom_kernels
from ..types import DeviceTypes
from ..util import torch2xp, tensorflow2xp, mxnet2xp
from ..util import is_cupy_array
from ..util import is_torch_cuda_array, is_tensorflow_gpu_array, is_mxnet_gpu_array
from ..compat import cupy, cupyx
from ..types import DeviceTypes
from ..util import (
is_cupy_array,
is_mxnet_gpu_array,
is_tensorflow_gpu_array,
is_torch_cuda_array,
mxnet2xp,
tensorflow2xp,
torch2xp,
)
from . import _custom_kernels
from .numpy_ops import NumpyOps
from .ops import Ops


@registry.ops("CupyOps")
Expand Down
5 changes: 2 additions & 3 deletions thinc/backends/linalg.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@
# cython: cdivision=True

cimport cython
from libc.stdint cimport int32_t
from libc.string cimport memset, memcpy
from cymem.cymem cimport Pool

from libc.stdint cimport int32_t
from libc.string cimport memcpy, memset

ctypedef float weight_t

Expand Down
4 changes: 3 additions & 1 deletion thinc/backends/mps_ops.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from typing import TYPE_CHECKING

import numpy

from .. import registry
from . import NumpyOps, Ops
from .numpy_ops import NumpyOps
from .ops import Ops

if TYPE_CHECKING:
# Type checking does not work with dynamic base classes, since MyPy cannot
Expand Down
Loading

0 comments on commit febb00f

Please sign in to comment.