diff --git a/pyproject.toml b/pyproject.toml index d3fb69b76..1ad6782d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,3 +9,6 @@ requires = [ "numpy>=1.15.0", ] build-backend = "setuptools.build_meta" + +[tool.isort] +profile = "black" diff --git a/requirements.txt b/requirements.txt index 522291e51..6ae0c270c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,3 +36,4 @@ nbformat>=5.0.4,<5.2.0 # Test to_disk/from_disk against pathlib.Path subclasses pathy>=0.3.5 black>=22.0,<23.0 +isort>=5.0,<6.0 diff --git a/thinc/__init__.py b/thinc/__init__.py index dfa821c4f..8f4a8a5a5 100644 --- a/thinc/__init__.py +++ b/thinc/__init__.py @@ -4,7 +4,6 @@ from .about import __version__ from .config import registry - # fmt: off __all__ = [ "registry", diff --git a/thinc/api.py b/thinc/api.py index 203c501da..6f795237a 100644 --- a/thinc/api.py +++ b/thinc/api.py @@ -1,52 +1,163 @@ -from .config import Config, registry, ConfigValidationError -from .initializers import normal_init, uniform_init, glorot_uniform_init, zero_init -from .initializers import configure_normal_init -from .loss import CategoricalCrossentropy, L2Distance, CosineDistance -from .loss import SequenceCategoricalCrossentropy -from .model import Model, serialize_attr, deserialize_attr -from .model import set_dropout_rate, change_attr_values, wrap_model_recursive -from .shims import Shim, PyTorchGradScaler, PyTorchShim, TensorFlowShim, keras_model_fns -from .shims import MXNetShim, TorchScriptShim, maybe_handshake_model -from .optimizers import Adam, RAdam, SGD, Optimizer -from .schedules import cyclic_triangular, warmup_linear, constant, constant_then -from .schedules import decaying, slanted_triangular, compounding -from .types import Ragged, Padded, ArgsKwargs, Unserializable -from .util import fix_random_seed, is_cupy_array, set_active_gpu -from .util import prefer_gpu, require_gpu, require_cpu -from .util import DataValidationError, data_validation -from .util import to_categorical, get_width, get_array_module, to_numpy -from .util import torch2xp, xp2torch, tensorflow2xp, xp2tensorflow, mxnet2xp, xp2mxnet -from .util import get_torch_default_device +from .backends import ( + CupyOps, + MPSOps, + NumpyOps, + Ops, + get_current_ops, + get_ops, + set_current_ops, + set_gpu_allocator, + use_ops, + use_pytorch_for_gpu_memory, + use_tensorflow_for_gpu_memory, +) from .compat import has_cupy -from .backends import get_ops, set_current_ops, get_current_ops, use_ops -from .backends import Ops, CupyOps, MPSOps, NumpyOps, set_gpu_allocator -from .backends import use_pytorch_for_gpu_memory, use_tensorflow_for_gpu_memory - -from .layers import Dropout, Embed, expand_window, HashEmbed, LayerNorm, Linear -from .layers import Maxout, Mish, MultiSoftmax, Relu, softmax_activation, Softmax, LSTM -from .layers import CauchySimilarity, ParametricAttention, Logistic -from .layers import resizable, sigmoid_activation, Sigmoid, SparseLinear -from .layers import SparseLinear_v2, ClippedLinear, ReluK, HardTanh, HardSigmoid -from .layers import Dish, HardSwish, HardSwishMobilenet, Swish, Gelu -from .layers import PyTorchWrapper, PyTorchRNNWrapper, PyTorchLSTM -from .layers import TensorFlowWrapper, keras_subclass, MXNetWrapper -from .layers import PyTorchWrapper_v2, Softmax_v2, PyTorchWrapper_v3 -from .layers import TorchScriptWrapper_v1, pytorch_to_torchscript_wrapper - -from .layers import add, bidirectional, chain, clone, concatenate, noop -from .layers import residual, uniqued, siamese, list2ragged, ragged2list -from .layers import map_list -from .layers import with_array, with_array2d -from .layers import with_padded, with_list, with_ragged, with_flatten -from .layers import with_reshape, with_getitem, strings2arrays, list2array -from .layers import list2ragged, ragged2list, list2padded, padded2list -from .layers import remap_ids, remap_ids_v2, premap_ids -from .layers import array_getitem, with_cpu, with_debug, with_nvtx_range -from .layers import with_signpost_interval -from .layers import tuplify, with_flatten_v2 - -from .layers import reduce_first, reduce_last, reduce_max, reduce_mean, reduce_sum - +from .config import Config, ConfigValidationError, registry +from .initializers import ( + configure_normal_init, + glorot_uniform_init, + normal_init, + uniform_init, + zero_init, +) +from .layers import ( + LSTM, + CauchySimilarity, + ClippedLinear, + Dish, + Dropout, + Embed, + Gelu, + HardSigmoid, + HardSwish, + HardSwishMobilenet, + HardTanh, + HashEmbed, + LayerNorm, + Linear, + Logistic, + Maxout, + Mish, + MultiSoftmax, + MXNetWrapper, + ParametricAttention, + PyTorchLSTM, + PyTorchRNNWrapper, + PyTorchWrapper, + PyTorchWrapper_v2, + PyTorchWrapper_v3, + Relu, + ReluK, + Sigmoid, + Softmax, + Softmax_v2, + SparseLinear, + SparseLinear_v2, + Swish, + TensorFlowWrapper, + TorchScriptWrapper_v1, + add, + array_getitem, + bidirectional, + chain, + clone, + concatenate, + expand_window, + keras_subclass, + list2array, + list2padded, + list2ragged, + map_list, + noop, + padded2list, + premap_ids, + pytorch_to_torchscript_wrapper, + ragged2list, + reduce_first, + reduce_last, + reduce_max, + reduce_mean, + reduce_sum, + remap_ids, + remap_ids_v2, + residual, + resizable, + siamese, + sigmoid_activation, + softmax_activation, + strings2arrays, + tuplify, + uniqued, + with_array, + with_array2d, + with_cpu, + with_debug, + with_flatten, + with_flatten_v2, + with_getitem, + with_list, + with_nvtx_range, + with_padded, + with_ragged, + with_reshape, + with_signpost_interval, +) +from .loss import ( + CategoricalCrossentropy, + CosineDistance, + L2Distance, + SequenceCategoricalCrossentropy, +) +from .model import ( + Model, + change_attr_values, + deserialize_attr, + serialize_attr, + set_dropout_rate, + wrap_model_recursive, +) +from .optimizers import SGD, Adam, Optimizer, RAdam +from .schedules import ( + compounding, + constant, + constant_then, + cyclic_triangular, + decaying, + slanted_triangular, + warmup_linear, +) +from .shims import ( + MXNetShim, + PyTorchGradScaler, + PyTorchShim, + Shim, + TensorFlowShim, + TorchScriptShim, + keras_model_fns, + maybe_handshake_model, +) +from .types import ArgsKwargs, Padded, Ragged, Unserializable +from .util import ( + DataValidationError, + data_validation, + fix_random_seed, + get_array_module, + get_torch_default_device, + get_width, + is_cupy_array, + mxnet2xp, + prefer_gpu, + require_cpu, + require_gpu, + set_active_gpu, + tensorflow2xp, + to_categorical, + to_numpy, + torch2xp, + xp2mxnet, + xp2tensorflow, + xp2torch, +) # fmt: off __all__ = [ diff --git a/thinc/backends/__init__.py b/thinc/backends/__init__.py index c21620126..8973c8836 100644 --- a/thinc/backends/__init__.py +++ b/thinc/backends/__init__.py @@ -1,20 +1,23 @@ import contextlib -from typing import Type, Dict, Any, Callable, Optional, cast - -from contextvars import ContextVar import threading +from contextvars import ContextVar +from typing import Any, Callable, Dict, Optional, Type, cast -from .ops import Ops -from .cupy_ops import CupyOps -from .numpy_ops import NumpyOps -from .mps_ops import MPSOps -from ._cupy_allocators import cupy_tensorflow_allocator, cupy_pytorch_allocator -from ._param_server import ParamServer -from ..util import assert_tensorflow_installed, assert_pytorch_installed -from ..util import get_torch_default_device, is_cupy_array, require_cpu from .. import registry from ..compat import cupy, has_cupy - +from ..util import ( + assert_pytorch_installed, + assert_tensorflow_installed, + get_torch_default_device, + is_cupy_array, + require_cpu, +) +from ._cupy_allocators import cupy_pytorch_allocator, cupy_tensorflow_allocator +from ._param_server import ParamServer +from .cupy_ops import CupyOps +from .mps_ops import MPSOps +from .numpy_ops import NumpyOps +from .ops import Ops context_ops: ContextVar[Optional[Ops]] = ContextVar("context_ops", default=None) context_pools: ContextVar[dict] = ContextVar("context_pools", default={}) diff --git a/thinc/backends/_cupy_allocators.py b/thinc/backends/_cupy_allocators.py index f2b6faee9..77c958e36 100644 --- a/thinc/backends/_cupy_allocators.py +++ b/thinc/backends/_cupy_allocators.py @@ -1,8 +1,8 @@ from typing import cast +from ..compat import cupy, tensorflow, torch from ..types import ArrayXd from ..util import get_torch_default_device, tensorflow2xp -from ..compat import torch, cupy, tensorflow def cupy_tensorflow_allocator(size_in_bytes: int): diff --git a/thinc/backends/_custom_kernels.py b/thinc/backends/_custom_kernels.py index 0b868e6d6..fa837017d 100644 --- a/thinc/backends/_custom_kernels.py +++ b/thinc/backends/_custom_kernels.py @@ -1,12 +1,13 @@ -from typing import Callable, Optional, Tuple -from functools import reduce -import numpy import operator import re -from pathlib import Path from collections import defaultdict -from ..compat import cupy, has_cupy_gpu +from functools import reduce +from pathlib import Path +from typing import Callable, Optional, Tuple +import numpy + +from ..compat import cupy, has_cupy_gpu PWD = Path(__file__).parent KERNELS_SRC = (PWD / "_custom_kernels.cu").read_text(encoding="utf8") diff --git a/thinc/backends/_param_server.py b/thinc/backends/_param_server.py index 4ce374a4e..db7b5a505 100644 --- a/thinc/backends/_param_server.py +++ b/thinc/backends/_param_server.py @@ -1,9 +1,8 @@ -from typing import Dict, Tuple, Optional, Any +from typing import Any, Dict, Optional, Tuple from ..types import FloatsXd from ..util import get_array_module - KeyT = Tuple[int, str] diff --git a/thinc/backends/cblas.pxd b/thinc/backends/cblas.pxd index 15837e5e7..73cea1f2d 100644 --- a/thinc/backends/cblas.pxd +++ b/thinc/backends/cblas.pxd @@ -1,6 +1,5 @@ from libcpp.memory cimport shared_ptr - ctypedef void (*sgemm_ptr)(bint transA, bint transB, int M, int N, int K, float alpha, const float* A, int lda, const float *B, int ldb, float beta, float* C, int ldc) nogil diff --git a/thinc/backends/cupy_ops.py b/thinc/backends/cupy_ops.py index 506276380..366faf70a 100644 --- a/thinc/backends/cupy_ops.py +++ b/thinc/backends/cupy_ops.py @@ -1,13 +1,20 @@ import numpy + from .. import registry -from .ops import Ops -from .numpy_ops import NumpyOps -from . import _custom_kernels -from ..types import DeviceTypes -from ..util import torch2xp, tensorflow2xp, mxnet2xp -from ..util import is_cupy_array -from ..util import is_torch_cuda_array, is_tensorflow_gpu_array, is_mxnet_gpu_array from ..compat import cupy, cupyx +from ..types import DeviceTypes +from ..util import ( + is_cupy_array, + is_mxnet_gpu_array, + is_tensorflow_gpu_array, + is_torch_cuda_array, + mxnet2xp, + tensorflow2xp, + torch2xp, +) +from . import _custom_kernels +from .numpy_ops import NumpyOps +from .ops import Ops @registry.ops("CupyOps") diff --git a/thinc/backends/linalg.pxd b/thinc/backends/linalg.pxd index 494a26c30..37fb9ea2b 100644 --- a/thinc/backends/linalg.pxd +++ b/thinc/backends/linalg.pxd @@ -2,10 +2,9 @@ # cython: cdivision=True cimport cython -from libc.stdint cimport int32_t -from libc.string cimport memset, memcpy from cymem.cymem cimport Pool - +from libc.stdint cimport int32_t +from libc.string cimport memcpy, memset ctypedef float weight_t diff --git a/thinc/backends/mps_ops.py b/thinc/backends/mps_ops.py index 8ebbd4e4b..c6ba71f11 100644 --- a/thinc/backends/mps_ops.py +++ b/thinc/backends/mps_ops.py @@ -1,8 +1,10 @@ from typing import TYPE_CHECKING + import numpy from .. import registry -from . import NumpyOps, Ops +from .numpy_ops import NumpyOps +from .ops import Ops if TYPE_CHECKING: # Type checking does not work with dynamic base classes, since MyPy cannot diff --git a/thinc/backends/numpy_ops.pyx b/thinc/backends/numpy_ops.pyx index c980e6c5d..f64aa29dd 100644 --- a/thinc/backends/numpy_ops.pyx +++ b/thinc/backends/numpy_ops.pyx @@ -1,27 +1,29 @@ # cython: cdivision=True # cython: infer_types=True # cython: profile=True -from typing import Optional from collections.abc import Sized +from typing import Optional + import numpy +cimport blis.cy cimport cython -from libc.string cimport memcpy, memset -from libc.stdlib cimport calloc, malloc, free -from libc.stdint cimport uint32_t, uint64_t -from libc.string cimport memcpy -from libc.math cimport isnan +cimport numpy as np from cymem.cymem cimport Pool -from preshed.maps cimport PreshMap +from libc.math cimport isnan +from libc.stdint cimport uint32_t, uint64_t +from libc.stdlib cimport calloc, free, malloc +from libc.string cimport memcpy, memset from murmurhash.mrmr cimport hash64 -cimport numpy as np -cimport blis.cy +from preshed.maps cimport PreshMap from .. import registry +from ..types import ArrayXd, DeviceTypes, DTypes, Shape from ..util import copy_array, get_array_module -from ..types import DeviceTypes, DTypes, Shape, ArrayXd + from .cblas cimport CBlas, daxpy, saxpy -from .linalg cimport VecVec, Vec +from .linalg cimport Vec, VecVec + from .ops import Ops try: diff --git a/thinc/backends/ops.py b/thinc/backends/ops.py index 8bb770023..01bb2f852 100644 --- a/thinc/backends/ops.py +++ b/thinc/backends/ops.py @@ -1,18 +1,53 @@ +import itertools import math +from typing import ( + Any, + Iterator, + List, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) -from typing import Optional, List, Tuple, Sequence, Type, Union, cast, TypeVar -from typing import Iterator, overload, Any import numpy -import itertools -from ..types import Xp, Shape, DTypes, DTypesInt, DTypesFloat, List2d, ArrayXd -from ..types import Floats1d, Floats2d, Floats3d, Floats4d -from ..types import Array1d, Array2d, Array3d, Array4d, ListXd -from ..types import FloatsXd, Ints1d, Ints2d, Ints3d, Ints4d, IntsXd, _Floats -from ..types import FloatsXdT -from ..types import DeviceTypes, Generator, Padded, Batchable, SizedGenerator +from ..types import ( + Array1d, + Array2d, + Array3d, + Array4d, + ArrayXd, + Batchable, + DeviceTypes, + DTypes, + DTypesFloat, + DTypesInt, + Floats1d, + Floats2d, + Floats3d, + Floats4d, + FloatsXd, + FloatsXdT, + Generator, + Ints1d, + Ints2d, + Ints3d, + Ints4d, + IntsXd, + List2d, + ListXd, + Padded, + Shape, + SizedGenerator, + Xp, + _Floats, +) from ..util import get_array_module, is_xp_array, to_numpy - from .cblas import CBlas ArrayT = TypeVar("ArrayT", bound=ArrayXd) diff --git a/thinc/compat.py b/thinc/compat.py index 54421e187..52a73669f 100644 --- a/thinc/compat.py +++ b/thinc/compat.py @@ -27,8 +27,8 @@ try: # pragma: no cover - import torch.utils.dlpack import torch + import torch.utils.dlpack has_torch = True has_torch_cuda_gpu = torch.cuda.device_count() != 0 @@ -51,8 +51,8 @@ torch_version = Version("0.0.0") try: # pragma: no cover - import tensorflow.experimental.dlpack import tensorflow + import tensorflow.experimental.dlpack has_tensorflow = True has_tensorflow_gpu = len(tensorflow.config.get_visible_devices("GPU")) > 0 diff --git a/thinc/config.py b/thinc/config.py index e5452819b..434c96085 100644 --- a/thinc/config.py +++ b/thinc/config.py @@ -1,6 +1,7 @@ import catalogue import confection -from confection import Config, ConfigValidationError, Promise, VARIABLE_RE +from confection import VARIABLE_RE, Config, ConfigValidationError, Promise + from .types import Decorator diff --git a/thinc/extra/search.pxd b/thinc/extra/search.pxd index daccbf58e..a27ba0525 100644 --- a/thinc/extra/search.pxd +++ b/thinc/extra/search.pxd @@ -1,7 +1,5 @@ from cymem.cymem cimport Pool - -from libc.stdint cimport uint32_t -from libc.stdint cimport uint64_t +from libc.stdint cimport uint32_t, uint64_t from libcpp.pair cimport pair from libcpp.queue cimport priority_queue from libcpp.vector cimport vector diff --git a/thinc/extra/search.pyx b/thinc/extra/search.pyx index d69756551..71cc85d8b 100644 --- a/thinc/extra/search.pyx +++ b/thinc/extra/search.pyx @@ -1,7 +1,8 @@ # cython: profile=True, experimental_cpp_class_def=True, cdivision=True, infer_types=True cimport cython -from libc.string cimport memset, memcpy -from libc.math cimport log, exp +from libc.math cimport exp, log +from libc.string cimport memcpy, memset + import math from cymem.cymem cimport Pool diff --git a/thinc/extra/tests/c_test_search.pyx b/thinc/extra/tests/c_test_search.pyx index a727d3364..70cdf5745 100644 --- a/thinc/extra/tests/c_test_search.pyx +++ b/thinc/extra/tests/c_test_search.pyx @@ -1,5 +1,6 @@ -from thinc.extra.search cimport Beam from cymem.cymem cimport Pool + +from thinc.extra.search cimport Beam from thinc.typedefs cimport class_t, weight_t diff --git a/thinc/initializers.py b/thinc/initializers.py index 1333911a3..feb02889d 100644 --- a/thinc/initializers.py +++ b/thinc/initializers.py @@ -1,4 +1,5 @@ from typing import Callable, cast + import numpy from .backends import Ops diff --git a/thinc/layers/__init__.py b/thinc/layers/__init__.py index 4b73a2dce..032af5fde 100644 --- a/thinc/layers/__init__.py +++ b/thinc/layers/__init__.py @@ -1,48 +1,48 @@ # Weights layers +# Combinators +from .add import add + +# Array manipulation +from .array_getitem import array_getitem +from .bidirectional import bidirectional from .cauchysimilarity import CauchySimilarity +from .chain import chain +from .clipped_linear import ClippedLinear, HardSigmoid, HardTanh, ReluK +from .clone import clone +from .concatenate import concatenate from .dish import Dish from .dropout import Dropout from .embed import Embed from .expand_window import expand_window +from .gelu import Gelu +from .hard_swish import HardSwish +from .hard_swish_mobilenet import HardSwishMobilenet from .hashembed import HashEmbed from .layernorm import LayerNorm from .linear import Linear -from .lstm import LSTM, PyTorchLSTM + +# Data-type transfers +from .list2array import list2array +from .list2padded import list2padded +from .list2ragged import list2ragged from .logistic import Logistic +from .lstm import LSTM, PyTorchLSTM +from .map_list import map_list from .maxout import Maxout from .mish import Mish from .multisoftmax import MultiSoftmax -from .parametricattention import ParametricAttention -from .pytorchwrapper import PyTorchWrapper, PyTorchWrapper_v2, PyTorchWrapper_v3 -from .pytorchwrapper import PyTorchRNNWrapper -from .relu import Relu -from .clipped_linear import ClippedLinear, ReluK, HardSigmoid, HardTanh -from .hard_swish import HardSwish -from .hard_swish_mobilenet import HardSwishMobilenet -from .swish import Swish -from .gelu import Gelu -from .resizable import resizable -from .sigmoid_activation import sigmoid_activation -from .sigmoid import Sigmoid -from .softmax_activation import softmax_activation -from .softmax import Softmax, Softmax_v2 -from .sparselinear import SparseLinear, SparseLinear_v2 -from .tensorflowwrapper import TensorFlowWrapper, keras_subclass -from .torchscriptwrapper import TorchScriptWrapper_v1, pytorch_to_torchscript_wrapper from .mxnetwrapper import MXNetWrapper - -# Combinators -from .add import add -from .bidirectional import bidirectional -from .chain import chain -from .clone import clone -from .concatenate import concatenate -from .map_list import map_list from .noop import noop -from .residual import residual -from .uniqued import uniqued -from .siamese import siamese -from .tuplify import tuplify +from .padded2list import padded2list +from .parametricattention import ParametricAttention +from .premap_ids import premap_ids +from .pytorchwrapper import ( + PyTorchRNNWrapper, + PyTorchWrapper, + PyTorchWrapper_v2, + PyTorchWrapper_v3, +) +from .ragged2list import ragged2list # Pooling from .reduce_first import reduce_first @@ -50,34 +50,36 @@ from .reduce_max import reduce_max from .reduce_mean import reduce_mean from .reduce_sum import reduce_sum - -# Array manipulation -from .array_getitem import array_getitem - -# Data-type transfers -from .list2array import list2array -from .list2ragged import list2ragged -from .list2padded import list2padded -from .ragged2list import ragged2list -from .padded2list import padded2list +from .relu import Relu from .remap_ids import remap_ids, remap_ids_v2 -from .premap_ids import premap_ids +from .residual import residual +from .resizable import resizable +from .siamese import siamese +from .sigmoid import Sigmoid +from .sigmoid_activation import sigmoid_activation +from .softmax import Softmax, Softmax_v2 +from .softmax_activation import softmax_activation +from .sparselinear import SparseLinear, SparseLinear_v2 from .strings2arrays import strings2arrays +from .swish import Swish +from .tensorflowwrapper import TensorFlowWrapper, keras_subclass +from .torchscriptwrapper import TorchScriptWrapper_v1, pytorch_to_torchscript_wrapper +from .tuplify import tuplify +from .uniqued import uniqued from .with_array import with_array from .with_array2d import with_array2d from .with_cpu import with_cpu +from .with_debug import with_debug from .with_flatten import with_flatten from .with_flatten_v2 import with_flatten_v2 -from .with_padded import with_padded +from .with_getitem import with_getitem from .with_list import with_list +from .with_nvtx_range import with_nvtx_range +from .with_padded import with_padded from .with_ragged import with_ragged from .with_reshape import with_reshape -from .with_getitem import with_getitem -from .with_debug import with_debug -from .with_nvtx_range import with_nvtx_range from .with_signpost_interval import with_signpost_interval - # fmt: off __all__ = [ "CauchySimilarity", diff --git a/thinc/layers/add.py b/thinc/layers/add.py index 60b1f46b9..a3aa1af17 100644 --- a/thinc/layers/add.py +++ b/thinc/layers/add.py @@ -1,11 +1,10 @@ -from typing import Any, Tuple, Callable, Optional, TypeVar, Dict +from typing import Any, Callable, Dict, Optional, Tuple, TypeVar -from ..model import Model from ..config import registry +from ..model import Model from ..types import ArrayXd, XY_XY_OutT from ..util import get_width - InT = TypeVar("InT", bound=Any) OutT = TypeVar("OutT", bound=ArrayXd) diff --git a/thinc/layers/array_getitem.py b/thinc/layers/array_getitem.py index 17ffcb7ee..219b4ea1c 100644 --- a/thinc/layers/array_getitem.py +++ b/thinc/layers/array_getitem.py @@ -1,7 +1,7 @@ -from typing import Union, Sequence, Tuple, TypeVar -from ..types import ArrayXd, FloatsXd, IntsXd -from ..model import Model +from typing import Sequence, Tuple, TypeVar, Union +from ..model import Model +from ..types import ArrayXd, FloatsXd, IntsXd AxisIndex = Union[int, slice, Sequence[int]] Index = Union[AxisIndex, Tuple[AxisIndex, ...]] diff --git a/thinc/layers/bidirectional.py b/thinc/layers/bidirectional.py index 1ff73f013..8cea04e30 100644 --- a/thinc/layers/bidirectional.py +++ b/thinc/layers/bidirectional.py @@ -1,11 +1,10 @@ -from typing import Optional, Tuple, Callable, cast +from typing import Callable, Optional, Tuple, cast from ..backends import Ops -from ..model import Model from ..config import registry +from ..model import Model from ..types import Padded - InT = Padded OutT = Padded diff --git a/thinc/layers/cauchysimilarity.py b/thinc/layers/cauchysimilarity.py index 25af8d9df..57e5932ec 100644 --- a/thinc/layers/cauchysimilarity.py +++ b/thinc/layers/cauchysimilarity.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model from ..config import registry +from ..model import Model from ..types import Floats1d, Floats2d from ..util import get_width - InT = Tuple[Floats2d, Floats2d] OutT = Floats1d diff --git a/thinc/layers/chain.py b/thinc/layers/chain.py index 258ee0902..a7e3ee7da 100644 --- a/thinc/layers/chain.py +++ b/thinc/layers/chain.py @@ -1,10 +1,9 @@ -from typing import Tuple, Callable, Optional, TypeVar, Any, Dict, List, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, cast -from ..model import Model from ..config import registry -from ..util import get_width +from ..model import Model from ..types import XY_YZ_OutT - +from ..util import get_width InT = TypeVar("InT") MidT = TypeVar("MidT") diff --git a/thinc/layers/clipped_linear.py b/thinc/layers/clipped_linear.py index 34bb8ade8..efe295fa6 100644 --- a/thinc/layers/clipped_linear.py +++ b/thinc/layers/clipped_linear.py @@ -1,13 +1,13 @@ -from typing import Tuple, Optional, Callable, cast +from typing import Callable, Optional, Tuple, cast from ..config import registry +from ..initializers import glorot_uniform_init, zero_init from ..model import Model +from ..types import Floats1d, Floats2d +from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout -from ..types import Floats1d, Floats2d -from ..util import partial, get_width -from ..initializers import glorot_uniform_init, zero_init +from .layernorm import LayerNorm @registry.layers("ClippedLinear.v1") diff --git a/thinc/layers/clone.py b/thinc/layers/clone.py index 8b433407d..1758f5fe7 100644 --- a/thinc/layers/clone.py +++ b/thinc/layers/clone.py @@ -1,10 +1,9 @@ -from typing import TypeVar, cast, List +from typing import List, TypeVar, cast -from .noop import noop -from .chain import chain -from ..model import Model from ..config import registry - +from ..model import Model +from .chain import chain +from .noop import noop InT = TypeVar("InT") OutT = TypeVar("OutT") diff --git a/thinc/layers/concatenate.py b/thinc/layers/concatenate.py index 4cce96954..e810cefc3 100644 --- a/thinc/layers/concatenate.py +++ b/thinc/layers/concatenate.py @@ -1,14 +1,22 @@ -from typing import Any, List, Tuple, Callable, Optional -from typing import TypeVar, cast, Dict, Union, Sequence +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + cast, +) from ..backends import NumpyOps -from ..model import Model from ..config import registry -from ..types import Array2d, Ragged +from ..model import Model +from ..types import Array2d, Ragged, XY_XY_OutT from ..util import get_width from .noop import noop -from ..types import XY_XY_OutT - NUMPY_OPS = NumpyOps() diff --git a/thinc/layers/dish.py b/thinc/layers/dish.py index 1092638e7..dc871ad24 100644 --- a/thinc/layers/dish.py +++ b/thinc/layers/dish.py @@ -1,13 +1,13 @@ -from typing import Tuple, Optional, Callable, cast +from typing import Callable, Optional, Tuple, cast from ..config import registry +from ..initializers import he_normal_init, zero_init from ..model import Model +from ..types import Floats1d, Floats2d +from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout -from ..types import Floats1d, Floats2d -from ..util import partial, get_width -from ..initializers import he_normal_init, zero_init +from .layernorm import LayerNorm @registry.layers("Dish.v1") diff --git a/thinc/layers/dropout.py b/thinc/layers/dropout.py index f4fa29445..7db35261a 100644 --- a/thinc/layers/dropout.py +++ b/thinc/layers/dropout.py @@ -1,9 +1,8 @@ -from typing import Tuple, Callable, List, TypeVar, cast, Union, Sequence +from typing import Callable, List, Sequence, Tuple, TypeVar, Union, cast -from ..model import Model from ..config import registry -from ..types import ArrayXd, Ragged, Padded - +from ..model import Model +from ..types import ArrayXd, Padded, Ragged InT = TypeVar("InT", bound=Union[ArrayXd, Sequence[ArrayXd], Ragged, Padded]) diff --git a/thinc/layers/embed.py b/thinc/layers/embed.py index 703baf475..9d8d34e4a 100644 --- a/thinc/layers/embed.py +++ b/thinc/layers/embed.py @@ -1,13 +1,12 @@ -from typing import Dict, Callable, Tuple, Optional, Union, cast, TypeVar +from typing import Callable, Dict, Optional, Tuple, TypeVar, Union, cast -from .chain import chain -from .array_getitem import ints_getitem -from ..model import Model from ..config import registry -from ..types import Ints1d, Ints2d, Floats1d, Floats2d from ..initializers import uniform_init +from ..model import Model +from ..types import Floats1d, Floats2d, Ints1d, Ints2d from ..util import get_width, partial - +from .array_getitem import ints_getitem +from .chain import chain InT = TypeVar("InT", bound=Union[Ints1d, Ints2d]) OutT = Floats2d diff --git a/thinc/layers/expand_window.py b/thinc/layers/expand_window.py index 1075a49a2..193b82d39 100644 --- a/thinc/layers/expand_window.py +++ b/thinc/layers/expand_window.py @@ -1,10 +1,9 @@ -from typing import Tuple, TypeVar, Callable, Union, cast +from typing import Callable, Tuple, TypeVar, Union, cast -from ..model import Model from ..config import registry +from ..model import Model from ..types import Floats2d, Ragged - InT = TypeVar("InT", Floats2d, Ragged) diff --git a/thinc/layers/gelu.py b/thinc/layers/gelu.py index 686b1f0d8..f51ee4545 100644 --- a/thinc/layers/gelu.py +++ b/thinc/layers/gelu.py @@ -1,13 +1,13 @@ -from typing import Tuple, Optional, Callable, cast +from typing import Callable, Optional, Tuple, cast from ..config import registry +from ..initializers import he_normal_init, zero_init from ..model import Model +from ..types import Floats1d, Floats2d +from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout -from ..types import Floats1d, Floats2d -from ..util import partial, get_width -from ..initializers import he_normal_init, zero_init +from .layernorm import LayerNorm @registry.layers("Gelu.v1") diff --git a/thinc/layers/hard_swish.py b/thinc/layers/hard_swish.py index 773314a38..2fc135e41 100644 --- a/thinc/layers/hard_swish.py +++ b/thinc/layers/hard_swish.py @@ -1,13 +1,13 @@ -from typing import Tuple, Optional, Callable, cast +from typing import Callable, Optional, Tuple, cast from ..config import registry +from ..initializers import he_normal_init, zero_init from ..model import Model +from ..types import Floats1d, Floats2d +from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout -from ..types import Floats1d, Floats2d -from ..util import partial, get_width -from ..initializers import he_normal_init, zero_init +from .layernorm import LayerNorm @registry.layers("HardSwish.v1") diff --git a/thinc/layers/hard_swish_mobilenet.py b/thinc/layers/hard_swish_mobilenet.py index 9f5f3fb9f..400622497 100644 --- a/thinc/layers/hard_swish_mobilenet.py +++ b/thinc/layers/hard_swish_mobilenet.py @@ -1,13 +1,13 @@ -from typing import Tuple, Optional, Callable, cast +from typing import Callable, Optional, Tuple, cast from ..config import registry +from ..initializers import he_normal_init, zero_init from ..model import Model +from ..types import Floats1d, Floats2d +from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout -from ..types import Floats1d, Floats2d -from ..util import partial, get_width -from ..initializers import he_normal_init, zero_init +from .layernorm import LayerNorm @registry.layers("HardSwishMobilenet.v1") diff --git a/thinc/layers/hashembed.py b/thinc/layers/hashembed.py index 8c85fdb02..7ecd9b26a 100644 --- a/thinc/layers/hashembed.py +++ b/thinc/layers/hashembed.py @@ -1,13 +1,12 @@ -from typing import Callable, Dict, Tuple, Optional, Any, Union, cast, TypeVar +from typing import Any, Callable, Dict, Optional, Tuple, TypeVar, Union, cast -from .chain import chain -from .array_getitem import ints_getitem -from ..model import Model from ..config import registry -from ..types import Floats1d, Floats2d, Ints2d, Ints1d from ..initializers import uniform_init +from ..model import Model +from ..types import Floats1d, Floats2d, Ints1d, Ints2d from ..util import partial - +from .array_getitem import ints_getitem +from .chain import chain InT = TypeVar("InT", bound=Union[Ints1d, Ints2d]) OutT = Floats2d diff --git a/thinc/layers/layernorm.py b/thinc/layers/layernorm.py index 684489c54..2090ed9a8 100644 --- a/thinc/layers/layernorm.py +++ b/thinc/layers/layernorm.py @@ -1,12 +1,11 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model +from ..backends import Ops from ..config import registry +from ..model import Model from ..types import Floats2d -from ..backends import Ops from ..util import get_width - InT = Floats2d diff --git a/thinc/layers/linear.py b/thinc/layers/linear.py index bbf7b7874..ef24ec044 100644 --- a/thinc/layers/linear.py +++ b/thinc/layers/linear.py @@ -1,12 +1,11 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model from ..config import registry -from ..types import Floats1d, Floats2d from ..initializers import glorot_uniform_init, zero_init +from ..model import Model +from ..types import Floats1d, Floats2d from ..util import get_width, partial - InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/list2array.py b/thinc/layers/list2array.py index a52d6e6c6..a31d5d80d 100644 --- a/thinc/layers/list2array.py +++ b/thinc/layers/list2array.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, TypeVar, List +from typing import Callable, List, Tuple, TypeVar from ..backends import NumpyOps -from ..model import Model from ..config import registry +from ..model import Model from ..types import Array2d - NUMPY_OPS = NumpyOps() diff --git a/thinc/layers/list2padded.py b/thinc/layers/list2padded.py index 2a02f90e0..e98e88a5c 100644 --- a/thinc/layers/list2padded.py +++ b/thinc/layers/list2padded.py @@ -1,9 +1,8 @@ -from typing import Tuple, Callable, TypeVar, cast +from typing import Callable, Tuple, TypeVar, cast -from ..types import Padded, List2d -from ..model import Model from ..config import registry - +from ..model import Model +from ..types import List2d, Padded InT = TypeVar("InT", bound=List2d) OutT = Padded diff --git a/thinc/layers/list2ragged.py b/thinc/layers/list2ragged.py index a63237dfe..25ad7bed3 100644 --- a/thinc/layers/list2ragged.py +++ b/thinc/layers/list2ragged.py @@ -1,9 +1,8 @@ -from typing import Tuple, List, Callable, cast, TypeVar +from typing import Callable, List, Tuple, TypeVar, cast -from ..model import Model from ..config import registry -from ..types import ListXd, ArrayXd, Ragged - +from ..model import Model +from ..types import ArrayXd, ListXd, Ragged InT = TypeVar("InT", bound=ListXd) OutT = Ragged diff --git a/thinc/layers/logistic.py b/thinc/layers/logistic.py index cda0c7dd5..43d45a330 100644 --- a/thinc/layers/logistic.py +++ b/thinc/layers/logistic.py @@ -1,10 +1,9 @@ -from typing import Tuple, Callable +from typing import Callable, Tuple -from ..model import Model from ..config import registry +from ..model import Model from ..types import Floats2d - InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/lstm.py b/thinc/layers/lstm.py index 266fee6e3..c817cd4db 100644 --- a/thinc/layers/lstm.py +++ b/thinc/layers/lstm.py @@ -1,13 +1,13 @@ -from typing import Optional, Tuple, Callable, cast from functools import partial +from typing import Callable, Optional, Tuple, cast -from ..model import Model +from ..backends import Ops from ..config import registry -from ..util import get_width +from ..initializers import glorot_uniform_init, zero_init +from ..model import Model from ..types import Floats1d, Floats2d, Floats4d, Padded, Ragged +from ..util import get_width from .noop import noop -from ..initializers import glorot_uniform_init, zero_init -from ..backends import Ops @registry.layers("LSTM.v1") @@ -45,8 +45,9 @@ def PyTorchLSTM( nO: int, nI: int, *, bi: bool = False, depth: int = 1, dropout: float = 0.0 ) -> Model[Padded, Padded]: import torch.nn - from .with_padded import with_padded + from .pytorchwrapper import PyTorchRNNWrapper + from .with_padded import with_padded if depth == 0: return noop() # type: ignore[misc] diff --git a/thinc/layers/map_list.py b/thinc/layers/map_list.py index b05a934b1..aaadf0b55 100644 --- a/thinc/layers/map_list.py +++ b/thinc/layers/map_list.py @@ -1,6 +1,6 @@ -from typing import Callable, TypeVar, List, Tuple, Optional -from ..model import Model +from typing import Callable, List, Optional, Tuple, TypeVar +from ..model import Model InT = TypeVar("InT") OutT = TypeVar("OutT") diff --git a/thinc/layers/maxout.py b/thinc/layers/maxout.py index 72788a5c7..ff0e52037 100644 --- a/thinc/layers/maxout.py +++ b/thinc/layers/maxout.py @@ -1,14 +1,13 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model from ..config import registry from ..initializers import glorot_uniform_init, zero_init +from ..model import Model from ..types import Floats2d from ..util import get_width, partial +from .chain import chain from .dropout import Dropout from .layernorm import LayerNorm -from .chain import chain - InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/mish.py b/thinc/layers/mish.py index ab7a2a76c..32542b963 100644 --- a/thinc/layers/mish.py +++ b/thinc/layers/mish.py @@ -1,14 +1,13 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model -from ..initializers import glorot_uniform_init, zero_init from ..config import registry +from ..initializers import glorot_uniform_init, zero_init +from ..model import Model from ..types import Floats1d, Floats2d from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout - +from .layernorm import LayerNorm InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/multisoftmax.py b/thinc/layers/multisoftmax.py index cf55ecc37..d07b684f4 100644 --- a/thinc/layers/multisoftmax.py +++ b/thinc/layers/multisoftmax.py @@ -1,11 +1,10 @@ -from typing import Optional, Tuple, Callable, cast +from typing import Callable, Optional, Tuple, cast -from ..types import Floats2d, Floats1d -from ..model import Model from ..config import registry +from ..model import Model +from ..types import Floats1d, Floats2d from ..util import get_width - InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/mxnetwrapper.py b/thinc/layers/mxnetwrapper.py index 642d01f38..2303871fb 100644 --- a/thinc/layers/mxnetwrapper.py +++ b/thinc/layers/mxnetwrapper.py @@ -1,11 +1,10 @@ -from typing import Callable, Tuple, Optional, Any, Type +from typing import Any, Callable, Optional, Tuple, Type +from ..config import registry from ..model import Model from ..shims import MXNetShim -from ..config import registry -from ..util import is_xp_array, is_mxnet_array -from ..util import mxnet2xp, xp2mxnet, convert_recursive from ..types import ArgsKwargs +from ..util import convert_recursive, is_mxnet_array, is_xp_array, mxnet2xp, xp2mxnet @registry.layers("MXNetWrapper.v1") diff --git a/thinc/layers/noop.py b/thinc/layers/noop.py index d1c83d1cd..2e855b875 100644 --- a/thinc/layers/noop.py +++ b/thinc/layers/noop.py @@ -1,8 +1,7 @@ -from typing import Tuple, Callable, TypeVar +from typing import Callable, Tuple, TypeVar -from ..model import Model from ..config import registry - +from ..model import Model InOutT = TypeVar("InOutT") diff --git a/thinc/layers/padded2list.py b/thinc/layers/padded2list.py index 8f1bee7e8..a4d374e6b 100644 --- a/thinc/layers/padded2list.py +++ b/thinc/layers/padded2list.py @@ -1,9 +1,8 @@ -from typing import Tuple, Callable, TypeVar, cast +from typing import Callable, Tuple, TypeVar, cast -from ..types import Padded, List2d -from ..model import Model from ..config import registry - +from ..model import Model +from ..types import List2d, Padded InT = Padded OutT = TypeVar("OutT", bound=List2d) diff --git a/thinc/layers/parametricattention.py b/thinc/layers/parametricattention.py index d54a2f19e..a03906f51 100644 --- a/thinc/layers/parametricattention.py +++ b/thinc/layers/parametricattention.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, Optional +from typing import Callable, Optional, Tuple -from ..model import Model from ..config import registry +from ..model import Model from ..types import Ragged from ..util import get_width - InT = Ragged OutT = Ragged diff --git a/thinc/layers/premap_ids.pyx b/thinc/layers/premap_ids.pyx index 74bc8dc6a..17acafa8e 100644 --- a/thinc/layers/premap_ids.pyx +++ b/thinc/layers/premap_ids.pyx @@ -1,13 +1,15 @@ # cython: binding=True, infer_types=True import numpy + from preshed.maps cimport PreshMap -from typing import Dict, Union, Optional, cast, Callable, Tuple, Mapping -from ..types import Ints1d, Ints2d + +from typing import Callable, Dict, Mapping, Optional, Tuple, Union, cast + from ..config import registry from ..model import Model +from ..types import Ints1d, Ints2d from ..util import to_numpy - InT = Union[Ints1d, Ints2d] OutT = Ints2d diff --git a/thinc/layers/pytorchwrapper.py b/thinc/layers/pytorchwrapper.py index a1b0c462a..39c8b95c1 100644 --- a/thinc/layers/pytorchwrapper.py +++ b/thinc/layers/pytorchwrapper.py @@ -1,12 +1,18 @@ -from typing import Callable, Dict, Tuple, Optional, Any, cast +from typing import Any, Callable, Dict, Optional, Tuple, cast from ..compat import torch +from ..config import registry from ..model import Model from ..shims import PyTorchGradScaler, PyTorchShim -from ..config import registry -from ..util import is_xp_array, is_torch_array, partial -from ..util import xp2torch, torch2xp, convert_recursive -from ..types import Floats3d, ArgsKwargs, Padded +from ..types import ArgsKwargs, Floats3d, Padded +from ..util import ( + convert_recursive, + is_torch_array, + is_xp_array, + partial, + torch2xp, + xp2torch, +) @registry.layers("PyTorchRNNWrapper.v1") diff --git a/thinc/layers/ragged2list.py b/thinc/layers/ragged2list.py index 35af28f2f..3d8463f11 100644 --- a/thinc/layers/ragged2list.py +++ b/thinc/layers/ragged2list.py @@ -1,9 +1,8 @@ -from typing import Tuple, Callable, TypeVar, cast +from typing import Callable, Tuple, TypeVar, cast -from ..model import Model from ..config import registry -from ..types import Ragged, ListXd - +from ..model import Model +from ..types import ListXd, Ragged InT = Ragged OutT = TypeVar("OutT", bound=ListXd) diff --git a/thinc/layers/reduce_first.py b/thinc/layers/reduce_first.py index ab72cb5e3..ede42c5d0 100644 --- a/thinc/layers/reduce_first.py +++ b/thinc/layers/reduce_first.py @@ -1,11 +1,10 @@ from typing import Callable, Tuple, cast -from ..model import Model from ..config import registry -from ..types import Ragged, Floats2d +from ..model import Model +from ..types import Floats2d, Ragged from ..util import ArrayInfo - InT = Ragged OutT = Floats2d diff --git a/thinc/layers/reduce_last.py b/thinc/layers/reduce_last.py index b8194ec2b..d2de6a877 100644 --- a/thinc/layers/reduce_last.py +++ b/thinc/layers/reduce_last.py @@ -1,8 +1,8 @@ from typing import Callable, Tuple, cast -from ..model import Model from ..config import registry -from ..types import Ragged, Floats2d +from ..model import Model +from ..types import Floats2d, Ragged from ..util import ArrayInfo InT = Ragged diff --git a/thinc/layers/reduce_max.py b/thinc/layers/reduce_max.py index ebafb5172..e6f033e48 100644 --- a/thinc/layers/reduce_max.py +++ b/thinc/layers/reduce_max.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, cast +from typing import Callable, Tuple, cast -from ..types import Floats2d, Ragged -from ..model import Model from ..config import registry +from ..model import Model +from ..types import Floats2d, Ragged from ..util import ArrayInfo - InT = Ragged OutT = Floats2d diff --git a/thinc/layers/reduce_mean.py b/thinc/layers/reduce_mean.py index f37ae8253..f1bd04898 100644 --- a/thinc/layers/reduce_mean.py +++ b/thinc/layers/reduce_mean.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, cast +from typing import Callable, Tuple, cast -from ..types import Floats2d, Ragged -from ..model import Model from ..config import registry +from ..model import Model +from ..types import Floats2d, Ragged from ..util import ArrayInfo - InT = Ragged OutT = Floats2d diff --git a/thinc/layers/reduce_sum.py b/thinc/layers/reduce_sum.py index e93a362d8..62ade00f6 100644 --- a/thinc/layers/reduce_sum.py +++ b/thinc/layers/reduce_sum.py @@ -1,11 +1,10 @@ from typing import Callable, Tuple, cast -from ..model import Model from ..config import registry +from ..model import Model from ..types import Floats2d, Ragged from ..util import ArrayInfo - InT = Ragged OutT = Floats2d diff --git a/thinc/layers/relu.py b/thinc/layers/relu.py index d1d3ebf74..488a1eff7 100644 --- a/thinc/layers/relu.py +++ b/thinc/layers/relu.py @@ -1,14 +1,13 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model -from ..initializers import glorot_uniform_init, zero_init from ..config import registry -from ..types import Floats2d, Floats1d +from ..initializers import glorot_uniform_init, zero_init +from ..model import Model +from ..types import Floats1d, Floats2d from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout - +from .layernorm import LayerNorm InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/remap_ids.py b/thinc/layers/remap_ids.py index 265b24a9d..3801b703f 100644 --- a/thinc/layers/remap_ids.py +++ b/thinc/layers/remap_ids.py @@ -1,12 +1,10 @@ -from typing import Tuple, Callable, Sequence, cast -from typing import Dict, Union, Optional, Hashable, Any +from typing import Any, Callable, Dict, Hashable, Optional, Sequence, Tuple, Union, cast -from ..model import Model from ..config import registry -from ..types import Ints1d, Ints2d, DTypes +from ..model import Model +from ..types import DTypes, Ints1d, Ints2d from ..util import is_xp_array, to_numpy - InT = Union[Sequence[Hashable], Ints1d, Ints2d] OutT = Ints2d diff --git a/thinc/layers/residual.py b/thinc/layers/residual.py index 3793ee1d5..f213e9bf5 100644 --- a/thinc/layers/residual.py +++ b/thinc/layers/residual.py @@ -1,8 +1,8 @@ -from typing import Tuple, Callable, Optional, List, TypeVar +from typing import Callable, List, Optional, Tuple, TypeVar -from ..model import Model from ..config import registry -from ..types import Floats1d, Floats2d, Floats3d, Floats4d, FloatsXd, Ragged, Padded +from ..model import Model +from ..types import Floats1d, Floats2d, Floats3d, Floats4d, FloatsXd, Padded, Ragged # fmt: off InT = TypeVar( diff --git a/thinc/layers/resizable.py b/thinc/layers/resizable.py index 2dd4dde1a..606d50dae 100644 --- a/thinc/layers/resizable.py +++ b/thinc/layers/resizable.py @@ -1,7 +1,7 @@ from typing import Callable, Optional, TypeVar -from ..model import Model from ..config import registry +from ..model import Model from ..types import Floats2d InT = TypeVar("InT") diff --git a/thinc/layers/siamese.py b/thinc/layers/siamese.py index 82bafacbb..33579a4de 100644 --- a/thinc/layers/siamese.py +++ b/thinc/layers/siamese.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, Optional, TypeVar +from typing import Callable, Optional, Tuple, TypeVar +from ..config import registry from ..model import Model from ..types import ArrayXd -from ..config import registry from ..util import get_width - LayerT = TypeVar("LayerT") SimT = TypeVar("SimT") InT = Tuple[LayerT, LayerT] diff --git a/thinc/layers/sigmoid.py b/thinc/layers/sigmoid.py index d8933b66e..157047e37 100644 --- a/thinc/layers/sigmoid.py +++ b/thinc/layers/sigmoid.py @@ -1,12 +1,11 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model from ..config import registry -from ..types import Floats2d, Floats1d from ..initializers import zero_init +from ..model import Model +from ..types import Floats1d, Floats2d from ..util import get_width, partial - InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/sigmoid_activation.py b/thinc/layers/sigmoid_activation.py index b87261075..37e188ab8 100644 --- a/thinc/layers/sigmoid_activation.py +++ b/thinc/layers/sigmoid_activation.py @@ -1,7 +1,7 @@ -from typing import TypeVar, Tuple, Callable, cast +from typing import Callable, Tuple, TypeVar, cast -from ..model import Model from ..config import registry +from ..model import Model from ..types import FloatsXdT diff --git a/thinc/layers/softmax.py b/thinc/layers/softmax.py index 9d766f1db..8b7301af0 100644 --- a/thinc/layers/softmax.py +++ b/thinc/layers/softmax.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, Optional, cast +from typing import Callable, Optional, Tuple, cast -from ..model import Model from ..config import registry -from ..types import Floats2d, Floats1d from ..initializers import zero_init -from ..util import get_width, partial, ArrayInfo - +from ..model import Model +from ..types import Floats1d, Floats2d +from ..util import ArrayInfo, get_width, partial InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/softmax_activation.py b/thinc/layers/softmax_activation.py index 858320143..974ed2c8c 100644 --- a/thinc/layers/softmax_activation.py +++ b/thinc/layers/softmax_activation.py @@ -1,10 +1,9 @@ -from typing import Tuple, Callable +from typing import Callable, Tuple -from ..model import Model from ..config import registry +from ..model import Model from ..types import Floats2d - InT = Floats2d OutT = Floats2d diff --git a/thinc/layers/sparselinear.pyx b/thinc/layers/sparselinear.pyx index b9a982f4b..a1be75ccc 100644 --- a/thinc/layers/sparselinear.pyx +++ b/thinc/layers/sparselinear.pyx @@ -1,16 +1,15 @@ # cython: infer_types=True, cdivision=True, bounds_check=False, wraparound=False -cimport numpy as np -from libc.stdint cimport uint64_t, int32_t, uint32_t cimport cython +cimport numpy as np +from libc.stdint cimport int32_t, uint32_t, uint64_t -from typing import Tuple, Callable, Optional +from typing import Callable, Optional, Tuple -from ..types import ArrayXd -from ..model import Model +from ..backends import CupyOps, NumpyOps from ..config import registry -from ..util import get_width, is_cupy_array, is_numpy_array, get_array_module -from ..backends import NumpyOps, CupyOps - +from ..model import Model +from ..types import ArrayXd +from ..util import get_array_module, get_width, is_cupy_array, is_numpy_array InT = Tuple[ArrayXd, ArrayXd, ArrayXd] OutT = ArrayXd diff --git a/thinc/layers/strings2arrays.py b/thinc/layers/strings2arrays.py index 469b1636d..91a6b1a31 100644 --- a/thinc/layers/strings2arrays.py +++ b/thinc/layers/strings2arrays.py @@ -1,11 +1,11 @@ -from typing import Tuple, List, Callable, Sequence +from typing import Callable, List, Sequence, Tuple + from murmurhash import hash_unicode -from ..model import Model from ..config import registry +from ..model import Model from ..types import Ints2d - InT = Sequence[Sequence[str]] OutT = List[Ints2d] diff --git a/thinc/layers/swish.py b/thinc/layers/swish.py index 4f3fe49d5..5cf8be50f 100644 --- a/thinc/layers/swish.py +++ b/thinc/layers/swish.py @@ -1,13 +1,13 @@ -from typing import Tuple, Optional, Callable, cast +from typing import Callable, Optional, Tuple, cast from ..config import registry +from ..initializers import he_normal_init, zero_init from ..model import Model +from ..types import Floats1d, Floats2d +from ..util import get_width, partial from .chain import chain -from .layernorm import LayerNorm from .dropout import Dropout -from ..types import Floats1d, Floats2d -from ..util import partial, get_width -from ..initializers import he_normal_init, zero_init +from .layernorm import LayerNorm @registry.layers("Swish.v1") diff --git a/thinc/layers/tensorflowwrapper.py b/thinc/layers/tensorflowwrapper.py index 7e166ea50..a77e0b3af 100644 --- a/thinc/layers/tensorflowwrapper.py +++ b/thinc/layers/tensorflowwrapper.py @@ -2,12 +2,18 @@ import srsly +from ..compat import tensorflow as tf from ..model import Model from ..shims import TensorFlowShim, keras_model_fns, maybe_handshake_model -from ..util import xp2tensorflow, tensorflow2xp, assert_tensorflow_installed -from ..util import is_tensorflow_array, convert_recursive, is_xp_array -from ..types import ArrayXd, ArgsKwargs -from ..compat import tensorflow as tf +from ..types import ArgsKwargs, ArrayXd +from ..util import ( + assert_tensorflow_installed, + convert_recursive, + is_tensorflow_array, + is_xp_array, + tensorflow2xp, + xp2tensorflow, +) InT = TypeVar("InT") OutT = TypeVar("OutT") diff --git a/thinc/layers/torchscriptwrapper.py b/thinc/layers/torchscriptwrapper.py index a74db9225..a3a8e1ac0 100644 --- a/thinc/layers/torchscriptwrapper.py +++ b/thinc/layers/torchscriptwrapper.py @@ -3,8 +3,11 @@ from ..compat import torch from ..model import Model from ..shims import PyTorchGradScaler, PyTorchShim, TorchScriptShim -from .pytorchwrapper import forward, convert_pytorch_default_inputs -from .pytorchwrapper import convert_pytorch_default_outputs +from .pytorchwrapper import ( + convert_pytorch_default_inputs, + convert_pytorch_default_outputs, + forward, +) def TorchScriptWrapper_v1( diff --git a/thinc/layers/tuplify.py b/thinc/layers/tuplify.py index 99b4d7589..35dfdc66f 100644 --- a/thinc/layers/tuplify.py +++ b/thinc/layers/tuplify.py @@ -1,7 +1,7 @@ -from typing import Optional, Tuple, Any, TypeVar +from typing import Any, Optional, Tuple, TypeVar -from ..model import Model from ..config import registry +from ..model import Model InT = TypeVar("InT") OutT = Tuple diff --git a/thinc/layers/uniqued.py b/thinc/layers/uniqued.py index 582b31093..26f2cdf16 100644 --- a/thinc/layers/uniqued.py +++ b/thinc/layers/uniqued.py @@ -1,10 +1,10 @@ -from typing import Tuple, Callable, Optional +from typing import Callable, Optional, Tuple + import numpy -from ..model import Model from ..config import registry -from ..types import Ints2d, Floats2d - +from ..model import Model +from ..types import Floats2d, Ints2d InT = Ints2d OutT = Floats2d diff --git a/thinc/layers/with_array.py b/thinc/layers/with_array.py index 2511b3c17..31b9fa494 100644 --- a/thinc/layers/with_array.py +++ b/thinc/layers/with_array.py @@ -1,10 +1,9 @@ -from typing import Tuple, Callable, Optional, TypeVar, Union, cast +from typing import Callable, Optional, Tuple, TypeVar, Union, cast from ..backends import NumpyOps -from ..model import Model from ..config import registry -from ..types import Padded, Ragged, ArrayXd, Array3d, ListXd - +from ..model import Model +from ..types import Array3d, ArrayXd, ListXd, Padded, Ragged NUMPY_OPS = NumpyOps() diff --git a/thinc/layers/with_array2d.py b/thinc/layers/with_array2d.py index 740593a26..98eba8b96 100644 --- a/thinc/layers/with_array2d.py +++ b/thinc/layers/with_array2d.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, Optional, TypeVar, cast, List, Union +from typing import Callable, List, Optional, Tuple, TypeVar, Union, cast from ..backends import NumpyOps -from ..model import Model from ..config import registry +from ..model import Model from ..types import Array2d, Floats2d, List2d, Padded, Ragged - NUMPY_OPS = NumpyOps() diff --git a/thinc/layers/with_cpu.py b/thinc/layers/with_cpu.py index 3fc7645a8..39e5965f2 100644 --- a/thinc/layers/with_cpu.py +++ b/thinc/layers/with_cpu.py @@ -1,10 +1,11 @@ -from typing import Tuple, Callable, Any +from typing import Any, Callable, Tuple import numpy + from thinc.backends import Ops -from ..model import Model from ..config import registry +from ..model import Model @registry.layers("with_cpu.v1") diff --git a/thinc/layers/with_debug.py b/thinc/layers/with_debug.py index 91505c9f6..21790e468 100644 --- a/thinc/layers/with_debug.py +++ b/thinc/layers/with_debug.py @@ -1,4 +1,4 @@ -from typing import Optional, Callable, Any, Tuple, TypeVar +from typing import Any, Callable, Optional, Tuple, TypeVar from ..model import Model diff --git a/thinc/layers/with_flatten.py b/thinc/layers/with_flatten.py index 5cf8a85cf..9658a788f 100644 --- a/thinc/layers/with_flatten.py +++ b/thinc/layers/with_flatten.py @@ -1,7 +1,7 @@ -from typing import Tuple, Callable, Sequence, Any, cast, TypeVar, Optional, List +from typing import Any, Callable, List, Optional, Sequence, Tuple, TypeVar, cast -from ..model import Model from ..config import registry +from ..model import Model from ..types import ArrayXd, ListXd ItemT = TypeVar("ItemT") diff --git a/thinc/layers/with_flatten_v2.py b/thinc/layers/with_flatten_v2.py index 4dd75e0d1..95549994f 100644 --- a/thinc/layers/with_flatten_v2.py +++ b/thinc/layers/with_flatten_v2.py @@ -1,8 +1,7 @@ -from typing import Tuple, Callable, Sequence, Any, cast, TypeVar, Optional, List +from typing import Any, Callable, List, Optional, Sequence, Tuple, TypeVar, cast -from ..model import Model from ..config import registry - +from ..model import Model InItemT = TypeVar("InItemT") OutItemT = TypeVar("OutItemT") diff --git a/thinc/layers/with_getitem.py b/thinc/layers/with_getitem.py index 9f6b93459..fb6a3cccf 100644 --- a/thinc/layers/with_getitem.py +++ b/thinc/layers/with_getitem.py @@ -1,8 +1,7 @@ -from typing import Callable, Optional, Tuple, Any +from typing import Any, Callable, Optional, Tuple -from ..model import Model from ..config import registry - +from ..model import Model InT = Tuple[Any, ...] OutT = Tuple[Any, ...] diff --git a/thinc/layers/with_list.py b/thinc/layers/with_list.py index 9f86c24dc..5331758a5 100644 --- a/thinc/layers/with_list.py +++ b/thinc/layers/with_list.py @@ -1,8 +1,8 @@ -from typing import Tuple, Callable, List, Optional, TypeVar, Union, cast +from typing import Callable, List, Optional, Tuple, TypeVar, Union, cast -from ..types import Padded, Ragged, Array2d, List2d, Floats2d, Ints2d -from ..model import Model from ..config import registry +from ..model import Model +from ..types import Array2d, Floats2d, Ints2d, List2d, Padded, Ragged SeqT = TypeVar("SeqT", Padded, Ragged, List2d, List[Floats2d], List[Ints2d]) diff --git a/thinc/layers/with_nvtx_range.py b/thinc/layers/with_nvtx_range.py index bf270abce..480f82a7c 100644 --- a/thinc/layers/with_nvtx_range.py +++ b/thinc/layers/with_nvtx_range.py @@ -1,9 +1,8 @@ -from typing import Optional, Callable, Any, Tuple, TypeVar +from typing import Any, Callable, Optional, Tuple, TypeVar from ..model import Model from ..util import use_nvtx_range - _ModelT = TypeVar("_ModelT", bound=Model) diff --git a/thinc/layers/with_padded.py b/thinc/layers/with_padded.py index 379df1bef..b92c6308a 100644 --- a/thinc/layers/with_padded.py +++ b/thinc/layers/with_padded.py @@ -1,11 +1,10 @@ -from typing import Tuple, Callable, Optional, TypeVar, Union, cast, List +from typing import Callable, List, Optional, Tuple, TypeVar, Union, cast -from ..types import Padded, Ragged, Floats3d, Ints1d, List2d, Array2d -from ..model import Model from ..config import registry +from ..model import Model +from ..types import Array2d, Floats3d, Ints1d, List2d, Padded, Ragged from ..util import is_xp_array - PaddedData = Tuple[Floats3d, Ints1d, Ints1d, Ints1d] SeqT = TypeVar("SeqT", bound=Union[Padded, Ragged, List2d, Floats3d, PaddedData]) diff --git a/thinc/layers/with_ragged.py b/thinc/layers/with_ragged.py index cbff6f59d..6cf45d9e8 100644 --- a/thinc/layers/with_ragged.py +++ b/thinc/layers/with_ragged.py @@ -1,10 +1,9 @@ -from typing import Tuple, Callable, Optional, TypeVar, cast, List, Union +from typing import Callable, List, Optional, Tuple, TypeVar, Union, cast from ..backends import NumpyOps -from ..types import Padded, Ragged, Array2d, ListXd, List2d, Ints1d -from ..model import Model from ..config import registry - +from ..model import Model +from ..types import Array2d, Ints1d, List2d, ListXd, Padded, Ragged NUMPY_OPS = NumpyOps() diff --git a/thinc/layers/with_reshape.py b/thinc/layers/with_reshape.py index 5bd3e9025..b40ada757 100644 --- a/thinc/layers/with_reshape.py +++ b/thinc/layers/with_reshape.py @@ -1,9 +1,8 @@ -from typing import Tuple, Callable, Optional, cast, TypeVar, List +from typing import Callable, List, Optional, Tuple, TypeVar, cast -from ..model import Model from ..config import registry -from ..types import Array3d, Array2d - +from ..model import Model +from ..types import Array2d, Array3d InT = TypeVar("InT", bound=Array3d) OutT = TypeVar("OutT", bound=Array2d) diff --git a/thinc/layers/with_signpost_interval.py b/thinc/layers/with_signpost_interval.py index 9a468d896..58f5d4165 100644 --- a/thinc/layers/with_signpost_interval.py +++ b/thinc/layers/with_signpost_interval.py @@ -1,9 +1,8 @@ -from typing import Optional, Callable, Any, Tuple, TypeVar +from typing import Any, Callable, Optional, Tuple, TypeVar from ..compat import has_os_signpost, os_signpost from ..model import Model - _ModelT = TypeVar("_ModelT", bound=Model) diff --git a/thinc/loss.py b/thinc/loss.py index e8edb194d..756dac4c3 100644 --- a/thinc/loss.py +++ b/thinc/loss.py @@ -1,11 +1,20 @@ -from typing import Tuple, Sequence, cast, TypeVar, Generic, Any, Union, Optional, List -from typing import Dict from abc import abstractmethod +from typing import ( + Any, + Dict, + Generic, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + cast, +) +from .config import registry from .types import Floats2d, Ints1d from .util import get_array_module, to_categorical -from .config import registry - LossT = TypeVar("LossT") GradT = TypeVar("GradT") diff --git a/thinc/model.py b/thinc/model.py index e094d5294..ba49215c1 100644 --- a/thinc/model.py +++ b/thinc/model.py @@ -1,20 +1,39 @@ -from typing import Dict, List, Callable, Optional, Any, Union, Iterable, Set, cast -from typing import Generic, Sequence, Tuple, TypeVar, Iterator import contextlib -from contextvars import ContextVar -import srsly -from pathlib import Path import copy import functools import threading +from contextvars import ContextVar +from pathlib import Path +from typing import ( + Any, + Callable, + Dict, + Generic, + Iterable, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + TypeVar, + Union, + cast, +) + +import srsly -from .backends import ParamServer, Ops, NumpyOps, CupyOps, get_current_ops +from .backends import CupyOps, NumpyOps, Ops, ParamServer, get_current_ops from .optimizers import Optimizer # noqa: F401 from .shims import Shim -from .util import convert_recursive, is_xp_array, DATA_VALIDATION -from .util import partial, validate_fwd_input_output from .types import FloatsXd - +from .util import ( + DATA_VALIDATION, + convert_recursive, + is_xp_array, + partial, + validate_fwd_input_output, +) InT = TypeVar("InT") OutT = TypeVar("OutT") diff --git a/thinc/mypy.py b/thinc/mypy.py index e02f6d5be..73c6e72f6 100644 --- a/thinc/mypy.py +++ b/thinc/mypy.py @@ -1,13 +1,14 @@ -from typing import Dict, List import itertools -from mypy.errors import Errors +from typing import Dict, List + +from mypy.checker import TypeChecker from mypy.errorcodes import ErrorCode +from mypy.errors import Errors +from mypy.nodes import CallExpr, Decorator, Expression, FuncDef, MypyFile, NameExpr from mypy.options import Options -from mypy.plugin import FunctionContext, Plugin, CheckerPluginInterface -from mypy.types import Instance, Type, CallableType, TypeVarType -from mypy.nodes import Expression, CallExpr, NameExpr, FuncDef, Decorator, MypyFile -from mypy.checker import TypeChecker +from mypy.plugin import CheckerPluginInterface, FunctionContext, Plugin from mypy.subtypes import is_subtype +from mypy.types import CallableType, Instance, Type, TypeVarType thinc_model_fullname = "thinc.model.Model" chained_out_fullname = "thinc.types.XY_YZ_OutT" diff --git a/thinc/optimizers.py b/thinc/optimizers.py index f34cd2ff8..4b4eca2b6 100644 --- a/thinc/optimizers.py +++ b/thinc/optimizers.py @@ -1,12 +1,10 @@ import math - -from typing import Dict, Optional, Union, Tuple, List, cast from collections import defaultdict +from typing import Dict, List, Optional, Tuple, Union, cast from .backends import get_array_ops -from .types import Generator, FloatsXd from .config import registry - +from .types import FloatsXd, Generator KeyT = Tuple[int, str] FloatOrSeq = Union[float, List[float], Generator] diff --git a/thinc/schedules.py b/thinc/schedules.py index 87581af74..c13868a5d 100644 --- a/thinc/schedules.py +++ b/thinc/schedules.py @@ -1,5 +1,6 @@ """Generators that provide different rates, schedules, decays or series.""" from typing import Iterable + import numpy from .config import registry diff --git a/thinc/shims/__init__.py b/thinc/shims/__init__.py index 9cd8bd030..fb246c9f2 100644 --- a/thinc/shims/__init__.py +++ b/thinc/shims/__init__.py @@ -1,10 +1,9 @@ -from .shim import Shim +from .mxnet import MXNetShim from .pytorch import PyTorchShim from .pytorch_grad_scaler import PyTorchGradScaler -from .tensorflow import keras_model_fns, TensorFlowShim, maybe_handshake_model +from .shim import Shim +from .tensorflow import TensorFlowShim, keras_model_fns, maybe_handshake_model from .torchscript import TorchScriptShim -from .mxnet import MXNetShim - # fmt: off __all__ = [ diff --git a/thinc/shims/mxnet.py b/thinc/shims/mxnet.py index 3962a2ef5..2dd36a62f 100644 --- a/thinc/shims/mxnet.py +++ b/thinc/shims/mxnet.py @@ -1,13 +1,19 @@ +import copy from typing import Any, cast + import srsly -import copy -from ..util import mxnet2xp, convert_recursive, make_tempfile, xp2mxnet -from ..util import get_array_module +from ..compat import mxnet as mx from ..optimizers import Optimizer from ..types import ArgsKwargs, FloatsXd +from ..util import ( + convert_recursive, + get_array_module, + make_tempfile, + mxnet2xp, + xp2mxnet, +) from .shim import Shim -from ..compat import mxnet as mx class MXNetShim(Shim): diff --git a/thinc/shims/pytorch.py b/thinc/shims/pytorch.py index 9582c8616..505669867 100644 --- a/thinc/shims/pytorch.py +++ b/thinc/shims/pytorch.py @@ -1,16 +1,21 @@ -from typing import Any, Dict, Optional, cast, Callable import contextlib -from io import BytesIO import itertools +from io import BytesIO +from typing import Any, Callable, Dict, Optional, cast + import srsly -from ..util import torch2xp, xp2torch, convert_recursive, iterate_recursive -from ..util import get_torch_default_device +from ..backends import CupyOps, context_pools, get_current_ops, set_gpu_allocator from ..compat import torch -from ..backends import get_current_ops, context_pools, CupyOps -from ..backends import set_gpu_allocator from ..optimizers import Optimizer from ..types import ArgsKwargs, FloatsXd +from ..util import ( + convert_recursive, + get_torch_default_device, + iterate_recursive, + torch2xp, + xp2torch, +) from .pytorch_grad_scaler import PyTorchGradScaler from .shim import Shim diff --git a/thinc/shims/shim.py b/thinc/shims/shim.py index 0c246e8d4..ef88408a3 100644 --- a/thinc/shims/shim.py +++ b/thinc/shims/shim.py @@ -1,8 +1,8 @@ -from typing import Any, Optional, Tuple, Callable, Dict, Union -import copy import contextlib -from pathlib import Path +import copy import threading +from pathlib import Path +from typing import Any, Callable, Dict, Optional, Tuple, Union class Shim: # pragma: no cover diff --git a/thinc/shims/tensorflow.py b/thinc/shims/tensorflow.py index d630d86f9..bcaae3aac 100644 --- a/thinc/shims/tensorflow.py +++ b/thinc/shims/tensorflow.py @@ -1,17 +1,18 @@ -from typing import Any, Dict, List, Optional -import catalogue import contextlib import copy from io import BytesIO +from typing import Any, Dict, List, Optional + +import catalogue import numpy from ..backends import Ops, get_current_ops +from ..compat import cupy, h5py +from ..compat import tensorflow as tf from ..optimizers import Optimizer from ..types import ArgsKwargs, ArrayXd from ..util import get_array_module from .shim import Shim -from ..compat import tensorflow as tf -from ..compat import cupy, h5py keras_model_fns = catalogue.create("thinc", "keras", entry_points=True) diff --git a/thinc/shims/torchscript.py b/thinc/shims/torchscript.py index 675718cd1..6c05c8a9b 100644 --- a/thinc/shims/torchscript.py +++ b/thinc/shims/torchscript.py @@ -1,5 +1,6 @@ -from typing import Any, Optional from io import BytesIO +from typing import Any, Optional + import srsly from ..compat import torch diff --git a/thinc/tests/backends/test_mem.py b/thinc/tests/backends/test_mem.py index cb26e24e0..bf867726d 100644 --- a/thinc/tests/backends/test_mem.py +++ b/thinc/tests/backends/test_mem.py @@ -1,6 +1,7 @@ -from thinc.backends._param_server import ParamServer import numpy +from thinc.backends._param_server import ParamServer + def test_param_server_init(): array = numpy.zeros((5,), dtype="f") diff --git a/thinc/tests/backends/test_ops.py b/thinc/tests/backends/test_ops.py index ba296e271..d5235ecc3 100644 --- a/thinc/tests/backends/test_ops.py +++ b/thinc/tests/backends/test_ops.py @@ -1,26 +1,32 @@ +import inspect +import platform from typing import Tuple, cast -import pytest import numpy -import platform +import pytest from hypothesis import given, settings from hypothesis.strategies import composite, integers from numpy.testing import assert_allclose from packaging.version import Version -from thinc.api import NumpyOps, CupyOps, Ops, get_ops -from thinc.api import get_current_ops, use_ops -from thinc.util import torch2xp, xp2torch + +from thinc.api import ( + LSTM, + CupyOps, + NumpyOps, + Ops, + fix_random_seed, + get_current_ops, + get_ops, + use_ops, +) +from thinc.backends._custom_kernels import KERNELS, KERNELS_LIST, compile_mmh from thinc.compat import has_cupy_gpu, has_torch, torch_version -from thinc.api import fix_random_seed -from thinc.api import LSTM from thinc.types import Floats2d -from thinc.backends._custom_kernels import KERNELS_LIST, KERNELS, compile_mmh -import inspect +from thinc.util import torch2xp, xp2torch from .. import strategies from ..strategies import arrays_BI, ndarrays_of_shape - MAX_EXAMPLES = 10 VANILLA_OPS = Ops(numpy) # type:ignore @@ -37,9 +43,10 @@ def create_pytorch_funcs(): - import torch import math + import torch + def torch_relu(x): return torch.nn.functional.relu(x) diff --git a/thinc/tests/conftest.py b/thinc/tests/conftest.py index 19b5137d3..026f3eb06 100644 --- a/thinc/tests/conftest.py +++ b/thinc/tests/conftest.py @@ -52,9 +52,10 @@ def getopt(opt): @pytest.fixture() def pathy_fixture(): pytest.importorskip("pathy") - import tempfile import shutil - from pathy import use_fs, Pathy + import tempfile + + from pathy import Pathy, use_fs temp_folder = tempfile.mkdtemp(prefix="thinc-pathy") use_fs(temp_folder) diff --git a/thinc/tests/layers/test_basic_tagger.py b/thinc/tests/layers/test_basic_tagger.py index 3046c1b04..855a6d6ad 100644 --- a/thinc/tests/layers/test_basic_tagger.py +++ b/thinc/tests/layers/test_basic_tagger.py @@ -1,7 +1,18 @@ -import pytest import random -from thinc.api import Model, Relu, Softmax, HashEmbed, expand_window -from thinc.api import chain, with_array, Adam, strings2arrays + +import pytest + +from thinc.api import ( + Adam, + HashEmbed, + Model, + Relu, + Softmax, + chain, + expand_window, + strings2arrays, + with_array, +) @pytest.fixture(scope="module") diff --git a/thinc/tests/layers/test_combinators.py b/thinc/tests/layers/test_combinators.py index ea5583108..c7b4fbe9f 100644 --- a/thinc/tests/layers/test_combinators.py +++ b/thinc/tests/layers/test_combinators.py @@ -1,8 +1,18 @@ -import pytest import numpy +import pytest from numpy.testing import assert_allclose -from thinc.api import clone, concatenate, noop, add, map_list -from thinc.api import Linear, Dropout, Model, NumpyOps + +from thinc.api import ( + Dropout, + Linear, + Model, + NumpyOps, + add, + clone, + concatenate, + map_list, + noop, +) from thinc.layers import chain, tuplify diff --git a/thinc/tests/layers/test_feed_forward.py b/thinc/tests/layers/test_feed_forward.py index b18a0fc0b..a808bb445 100644 --- a/thinc/tests/layers/test_feed_forward.py +++ b/thinc/tests/layers/test_feed_forward.py @@ -1,8 +1,10 @@ -import pytest -import numpy from functools import partial + +import numpy +import pytest from numpy.testing import assert_allclose -from thinc.api import chain, Linear, Relu, NumpyOps + +from thinc.api import Linear, NumpyOps, Relu, chain @pytest.fixture(params=[1, 2, 9]) diff --git a/thinc/tests/layers/test_hash_embed.py b/thinc/tests/layers/test_hash_embed.py index 8df50a03f..5b79539fa 100644 --- a/thinc/tests/layers/test_hash_embed.py +++ b/thinc/tests/layers/test_hash_embed.py @@ -1,4 +1,5 @@ import numpy + from thinc.api import HashEmbed diff --git a/thinc/tests/layers/test_layers_api.py b/thinc/tests/layers/test_layers_api.py index 761cad880..0ef559d96 100644 --- a/thinc/tests/layers/test_layers_api.py +++ b/thinc/tests/layers/test_layers_api.py @@ -1,14 +1,15 @@ from typing import List, Optional -from numpy.testing import assert_almost_equal -from thinc.api import registry, with_padded, Dropout, NumpyOps, Model -from thinc.backends import NumpyOps -from thinc.util import data_validation, get_width -from thinc.types import Ragged, Padded, Array2d, Floats2d, FloatsXd, Shape -from thinc.compat import has_torch import numpy import pytest import srsly +from numpy.testing import assert_almost_equal + +from thinc.api import Dropout, Model, NumpyOps, registry, with_padded +from thinc.backends import NumpyOps +from thinc.compat import has_torch +from thinc.types import Array2d, Floats2d, FloatsXd, Padded, Ragged, Shape +from thinc.util import data_validation, get_width OPS = NumpyOps() diff --git a/thinc/tests/layers/test_linear.py b/thinc/tests/layers/test_linear.py index 2362b556b..345669d87 100644 --- a/thinc/tests/layers/test_linear.py +++ b/thinc/tests/layers/test_linear.py @@ -1,9 +1,10 @@ +import numpy import pytest -from mock import MagicMock from hypothesis import given, settings -import numpy +from mock import MagicMock from numpy.testing import assert_allclose -from thinc.api import Linear, chain, Dropout, SGD + +from thinc.api import SGD, Dropout, Linear, chain from ..strategies import arrays_OI_O_BI from ..util import get_model, get_shape diff --git a/thinc/tests/layers/test_lstm.py b/thinc/tests/layers/test_lstm.py index 208ffb58b..44c90ed4c 100644 --- a/thinc/tests/layers/test_lstm.py +++ b/thinc/tests/layers/test_lstm.py @@ -1,10 +1,11 @@ -import numpy import timeit -from thinc.api import NumpyOps, LSTM, PyTorchLSTM, with_padded, fix_random_seed -from thinc.api import Ops -from thinc.compat import has_torch + +import numpy import pytest +from thinc.api import LSTM, NumpyOps, Ops, PyTorchLSTM, fix_random_seed, with_padded +from thinc.compat import has_torch + @pytest.fixture(params=[1, 6]) def nI(request): diff --git a/thinc/tests/layers/test_mappers.py b/thinc/tests/layers/test_mappers.py index e890dd086..85e984bc4 100644 --- a/thinc/tests/layers/test_mappers.py +++ b/thinc/tests/layers/test_mappers.py @@ -1,5 +1,6 @@ -import pytest import numpy +import pytest + from thinc.layers import premap_ids, remap_ids, remap_ids_v2 diff --git a/thinc/tests/layers/test_mnist.py b/thinc/tests/layers/test_mnist.py index 321de3a0f..060007cfd 100644 --- a/thinc/tests/layers/test_mnist.py +++ b/thinc/tests/layers/test_mnist.py @@ -1,8 +1,16 @@ import pytest -from thinc.api import Relu, Softmax, chain, clone, Adam -from thinc.api import PyTorchWrapper, TensorFlowWrapper -from thinc.api import get_current_ops -from thinc.compat import has_torch, has_tensorflow + +from thinc.api import ( + Adam, + PyTorchWrapper, + Relu, + Softmax, + TensorFlowWrapper, + chain, + clone, + get_current_ops, +) +from thinc.compat import has_tensorflow, has_torch @pytest.fixture(scope="module") diff --git a/thinc/tests/layers/test_mxnet_wrapper.py b/thinc/tests/layers/test_mxnet_wrapper.py index b954a8ec5..8ddf5dfce 100644 --- a/thinc/tests/layers/test_mxnet_wrapper.py +++ b/thinc/tests/layers/test_mxnet_wrapper.py @@ -2,10 +2,19 @@ import numpy import pytest -from thinc.api import Adam, ArgsKwargs, Model, Ops, MXNetWrapper -from thinc.api import get_current_ops, mxnet2xp, xp2mxnet -from thinc.types import Array2d, Array1d, IntsXd + +from thinc.api import ( + Adam, + ArgsKwargs, + Model, + MXNetWrapper, + Ops, + get_current_ops, + mxnet2xp, + xp2mxnet, +) from thinc.compat import has_cupy_gpu, has_mxnet +from thinc.types import Array1d, Array2d, IntsXd from thinc.util import to_categorical from ..util import check_input_converters, make_tempdir diff --git a/thinc/tests/layers/test_pytorch_wrapper.py b/thinc/tests/layers/test_pytorch_wrapper.py index f4f83cb60..aa40d9044 100644 --- a/thinc/tests/layers/test_pytorch_wrapper.py +++ b/thinc/tests/layers/test_pytorch_wrapper.py @@ -1,20 +1,34 @@ -from thinc.api import Linear, SGD, PyTorchWrapper, PyTorchWrapper_v2, PyTorchWrapper_v3 -from thinc.api import xp2torch, torch2xp, ArgsKwargs, use_ops -from thinc.api import chain, get_current_ops, Relu -from thinc.api import CupyOps, MPSOps, NumpyOps +import numpy +import pytest + +from thinc.api import ( + SGD, + ArgsKwargs, + CupyOps, + Linear, + MPSOps, + NumpyOps, + PyTorchWrapper, + PyTorchWrapper_v2, + PyTorchWrapper_v3, + Relu, + chain, + get_current_ops, + torch2xp, + use_ops, + xp2torch, +) from thinc.backends import context_pools +from thinc.compat import has_cupy_gpu, has_torch, has_torch_amp, has_torch_mps_gpu from thinc.layers.pytorchwrapper import PyTorchWrapper_v3 +from thinc.shims.pytorch import ( + default_deserialize_torch_model, + default_serialize_torch_model, +) from thinc.shims.pytorch_grad_scaler import PyTorchGradScaler -from thinc.shims.pytorch import default_deserialize_torch_model -from thinc.shims.pytorch import default_serialize_torch_model -from thinc.compat import has_torch, has_torch_amp -from thinc.compat import has_cupy_gpu, has_torch_mps_gpu -import numpy -import pytest from thinc.util import get_torch_default_device -from ..util import make_tempdir, check_input_converters - +from ..util import check_input_converters, make_tempdir XP_OPS = [NumpyOps()] if has_cupy_gpu: diff --git a/thinc/tests/layers/test_reduce.py b/thinc/tests/layers/test_reduce.py index d26065c4a..608561e13 100644 --- a/thinc/tests/layers/test_reduce.py +++ b/thinc/tests/layers/test_reduce.py @@ -1,5 +1,6 @@ -import pytest import numpy +import pytest + from thinc.api import reduce_first, reduce_last, reduce_max, reduce_mean, reduce_sum from thinc.types import Ragged diff --git a/thinc/tests/layers/test_resizable.py b/thinc/tests/layers/test_resizable.py index dfb6c67fd..ffa256de5 100644 --- a/thinc/tests/layers/test_resizable.py +++ b/thinc/tests/layers/test_resizable.py @@ -1,7 +1,9 @@ -import pytest from functools import partial -from thinc.api import resizable, Linear -from thinc.layers.resizable import resize_model, resize_linear_weighted + +import pytest + +from thinc.api import Linear, resizable +from thinc.layers.resizable import resize_linear_weighted, resize_model @pytest.fixture diff --git a/thinc/tests/layers/test_shim.py b/thinc/tests/layers/test_shim.py index bacde5cf6..dcb43ab1e 100644 --- a/thinc/tests/layers/test_shim.py +++ b/thinc/tests/layers/test_shim.py @@ -1,5 +1,7 @@ from typing import List + from thinc.shims.shim import Shim + from ..util import make_tempdir diff --git a/thinc/tests/layers/test_softmax.py b/thinc/tests/layers/test_softmax.py index 69072b558..95e2f41c7 100644 --- a/thinc/tests/layers/test_softmax.py +++ b/thinc/tests/layers/test_softmax.py @@ -1,8 +1,8 @@ from typing import Tuple, cast import numpy -from numpy.testing import assert_allclose import pytest +from numpy.testing import assert_allclose from thinc.api import Model, NumpyOps, Softmax_v2 from thinc.types import Floats2d, Ints1d diff --git a/thinc/tests/layers/test_sparse_linear.py b/thinc/tests/layers/test_sparse_linear.py index 87c5a3a75..cce0d1023 100644 --- a/thinc/tests/layers/test_sparse_linear.py +++ b/thinc/tests/layers/test_sparse_linear.py @@ -1,7 +1,9 @@ import math + import numpy import pytest -from thinc.api import SGD, to_categorical, SparseLinear, SparseLinear_v2 + +from thinc.api import SGD, SparseLinear, SparseLinear_v2, to_categorical @pytest.fixture diff --git a/thinc/tests/layers/test_tensorflow_wrapper.py b/thinc/tests/layers/test_tensorflow_wrapper.py index c1b85da3b..4741f6dc3 100644 --- a/thinc/tests/layers/test_tensorflow_wrapper.py +++ b/thinc/tests/layers/test_tensorflow_wrapper.py @@ -1,9 +1,19 @@ import numpy import pytest -from thinc.api import Adam, ArgsKwargs, Linear, Model, TensorFlowWrapper -from thinc.api import get_current_ops, keras_subclass, tensorflow2xp, xp2tensorflow -from thinc.util import to_categorical + +from thinc.api import ( + Adam, + ArgsKwargs, + Linear, + Model, + TensorFlowWrapper, + get_current_ops, + keras_subclass, + tensorflow2xp, + xp2tensorflow, +) from thinc.compat import has_cupy_gpu, has_tensorflow +from thinc.util import to_categorical from ..util import check_input_converters, make_tempdir diff --git a/thinc/tests/layers/test_torchscriptwrapper.py b/thinc/tests/layers/test_torchscriptwrapper.py index 37ff9ef08..b37afa3c3 100644 --- a/thinc/tests/layers/test_torchscriptwrapper.py +++ b/thinc/tests/layers/test_torchscriptwrapper.py @@ -1,8 +1,11 @@ -import pytest import numpy +import pytest -from thinc.api import PyTorchWrapper_v2, TorchScriptWrapper_v1 -from thinc.api import pytorch_to_torchscript_wrapper +from thinc.api import ( + PyTorchWrapper_v2, + TorchScriptWrapper_v1, + pytorch_to_torchscript_wrapper, +) from thinc.compat import has_torch, torch diff --git a/thinc/tests/layers/test_transforms.py b/thinc/tests/layers/test_transforms.py index 8de5341d7..3a9a110f1 100644 --- a/thinc/tests/layers/test_transforms.py +++ b/thinc/tests/layers/test_transforms.py @@ -1,7 +1,8 @@ -from thinc.api import strings2arrays, NumpyOps, Ragged, registry import numpy import pytest +from thinc.api import NumpyOps, Ragged, registry, strings2arrays + from ..util import get_data_checker diff --git a/thinc/tests/layers/test_uniqued.py b/thinc/tests/layers/test_uniqued.py index 9cb207ca5..685da1deb 100644 --- a/thinc/tests/layers/test_uniqued.py +++ b/thinc/tests/layers/test_uniqued.py @@ -1,10 +1,11 @@ -import pytest import numpy +import pytest +from hypothesis import given, settings +from hypothesis.strategies import composite, integers, lists +from numpy.testing import assert_allclose + from thinc.layers import Embed from thinc.layers.uniqued import uniqued -from numpy.testing import assert_allclose -from hypothesis import given, settings -from hypothesis.strategies import integers, lists, composite ROWS = 10 diff --git a/thinc/tests/layers/test_with_debug.py b/thinc/tests/layers/test_with_debug.py index 679c1f21e..3f65a3ac3 100644 --- a/thinc/tests/layers/test_with_debug.py +++ b/thinc/tests/layers/test_with_debug.py @@ -1,5 +1,6 @@ from mock import MagicMock -from thinc.api import with_debug, Linear + +from thinc.api import Linear, with_debug def test_with_debug(): diff --git a/thinc/tests/layers/test_with_flatten.py b/thinc/tests/layers/test_with_flatten.py index 1ff622026..86d18eb67 100644 --- a/thinc/tests/layers/test_with_flatten.py +++ b/thinc/tests/layers/test_with_flatten.py @@ -1,4 +1,5 @@ from typing import List + from thinc.api import Model, with_flatten_v2 INPUT = [[1, 2, 3], [4, 5], [], [6, 7, 8]] diff --git a/thinc/tests/layers/test_with_transforms.py b/thinc/tests/layers/test_with_transforms.py index c23db1463..82cdaed36 100644 --- a/thinc/tests/layers/test_with_transforms.py +++ b/thinc/tests/layers/test_with_transforms.py @@ -1,11 +1,20 @@ -import pytest import numpy import numpy.testing -from thinc.api import NumpyOps, Model, Linear, noop -from thinc.api import with_array2d, with_array, with_padded, with_list -from thinc.api import with_ragged, with_getitem -from thinc.types import Padded, Ragged +import pytest +from thinc.api import ( + Linear, + Model, + NumpyOps, + noop, + with_array, + with_array2d, + with_getitem, + with_list, + with_padded, + with_ragged, +) +from thinc.types import Padded, Ragged from ..util import get_data_checker diff --git a/thinc/tests/model/test_model.py b/thinc/tests/model/test_model.py index 733b3329f..f93b46c8c 100644 --- a/thinc/tests/model/test_model.py +++ b/thinc/tests/model/test_model.py @@ -1,13 +1,28 @@ -from collections import Counter -import pytest import threading import time -from thinc.api import Adam, CupyOps, Dropout, Linear, Model, Relu -from thinc.api import Shim, Softmax, chain, change_attr_values -from thinc.api import concatenate, set_dropout_rate -from thinc.api import use_ops, with_debug, wrap_model_recursive -from thinc.compat import has_cupy_gpu +from collections import Counter + import numpy +import pytest + +from thinc.api import ( + Adam, + CupyOps, + Dropout, + Linear, + Model, + Relu, + Shim, + Softmax, + chain, + change_attr_values, + concatenate, + set_dropout_rate, + use_ops, + with_debug, + wrap_model_recursive, +) +from thinc.compat import has_cupy_gpu from ..util import make_tempdir diff --git a/thinc/tests/model/test_validation.py b/thinc/tests/model/test_validation.py index adecdd6d5..c58efd015 100644 --- a/thinc/tests/model/test_validation.py +++ b/thinc/tests/model/test_validation.py @@ -1,6 +1,15 @@ import pytest -from thinc.api import chain, Relu, reduce_max, Softmax, with_ragged -from thinc.api import ParametricAttention, list2ragged, reduce_sum + +from thinc.api import ( + ParametricAttention, + Relu, + Softmax, + chain, + list2ragged, + reduce_max, + reduce_sum, + with_ragged, +) from thinc.util import DataValidationError, data_validation diff --git a/thinc/tests/mypy/modules/fail_no_plugin.py b/thinc/tests/mypy/modules/fail_no_plugin.py index 807fd672b..f53e33ef3 100644 --- a/thinc/tests/mypy/modules/fail_no_plugin.py +++ b/thinc/tests/mypy/modules/fail_no_plugin.py @@ -1,4 +1,4 @@ -from thinc.api import chain, Relu, reduce_max, Softmax, add +from thinc.api import Relu, Softmax, add, chain, reduce_max bad_model = chain(Relu(10), reduce_max(), Softmax()) diff --git a/thinc/tests/mypy/modules/fail_plugin.py b/thinc/tests/mypy/modules/fail_plugin.py index b14fcecf0..6f23c82b1 100644 --- a/thinc/tests/mypy/modules/fail_plugin.py +++ b/thinc/tests/mypy/modules/fail_plugin.py @@ -1,4 +1,4 @@ -from thinc.api import chain, Relu, reduce_max, Softmax, add, concatenate +from thinc.api import Relu, Softmax, add, chain, concatenate, reduce_max bad_model = chain(Relu(10), reduce_max(), Softmax()) diff --git a/thinc/tests/mypy/modules/success_no_plugin.py b/thinc/tests/mypy/modules/success_no_plugin.py index b17cff053..058573e5b 100644 --- a/thinc/tests/mypy/modules/success_no_plugin.py +++ b/thinc/tests/mypy/modules/success_no_plugin.py @@ -1,4 +1,4 @@ -from thinc.api import chain, Relu, reduce_max, Softmax, add +from thinc.api import Relu, Softmax, add, chain, reduce_max good_model = chain(Relu(10), Relu(10), Softmax()) reveal_type(good_model) diff --git a/thinc/tests/mypy/modules/success_plugin.py b/thinc/tests/mypy/modules/success_plugin.py index 85879a88a..3214bdcb7 100644 --- a/thinc/tests/mypy/modules/success_plugin.py +++ b/thinc/tests/mypy/modules/success_plugin.py @@ -1,6 +1,6 @@ from typing import Any, TypeVar -from thinc.api import chain, Relu, reduce_max, Softmax, add, Model +from thinc.api import Model, Relu, Softmax, add, chain, reduce_max good_model = chain(Relu(10), Relu(10), Softmax()) reveal_type(good_model) diff --git a/thinc/tests/mypy/test_mypy.py b/thinc/tests/mypy/test_mypy.py index 2f2976882..f144128f4 100644 --- a/thinc/tests/mypy/test_mypy.py +++ b/thinc/tests/mypy/test_mypy.py @@ -1,8 +1,8 @@ import os import re -from pathlib import Path import shutil import sys +from pathlib import Path import pytest diff --git a/thinc/tests/regression/issue519/program.py b/thinc/tests/regression/issue519/program.py index b3e6dc9ba..bce5f3234 100644 --- a/thinc/tests/regression/issue519/program.py +++ b/thinc/tests/regression/issue519/program.py @@ -1,4 +1,4 @@ -from thinc.api import chain, concatenate, Relu, Softmax +from thinc.api import Relu, Softmax, chain, concatenate from thinc.model import Model from thinc.types import Floats2d diff --git a/thinc/tests/regression/test_issue208.py b/thinc/tests/regression/test_issue208.py index 25d7280f1..0c574d6d1 100644 --- a/thinc/tests/regression/test_issue208.py +++ b/thinc/tests/regression/test_issue208.py @@ -1,4 +1,4 @@ -from thinc.api import chain, Linear +from thinc.api import Linear, chain def test_issue208(): diff --git a/thinc/tests/shims/test_pytorch_grad_scaler.py b/thinc/tests/shims/test_pytorch_grad_scaler.py index 2ab0fa738..d4ac10fec 100644 --- a/thinc/tests/shims/test_pytorch_grad_scaler.py +++ b/thinc/tests/shims/test_pytorch_grad_scaler.py @@ -1,10 +1,10 @@ import pytest - from hypothesis import given, settings from hypothesis.strategies import lists, one_of, tuples + +from thinc.api import PyTorchGradScaler from thinc.compat import has_torch, has_torch_amp, has_torch_cuda_gpu, torch from thinc.util import is_torch_array -from thinc.api import PyTorchGradScaler from ..strategies import ndarrays diff --git a/thinc/tests/strategies.py b/thinc/tests/strategies.py index 322728cd9..bc12975aa 100644 --- a/thinc/tests/strategies.py +++ b/thinc/tests/strategies.py @@ -1,7 +1,8 @@ import numpy -from hypothesis.strategies import just, tuples, integers, floats from hypothesis.extra.numpy import arrays -from thinc.api import NumpyOps, Linear +from hypothesis.strategies import floats, integers, just, tuples + +from thinc.api import Linear, NumpyOps def get_ops(): diff --git a/thinc/tests/test_config.py b/thinc/tests/test_config.py index 0dceadfc4..fe2118e25 100644 --- a/thinc/tests/test_config.py +++ b/thinc/tests/test_config.py @@ -1,20 +1,21 @@ -import pytest -from typing import Iterable, Union, Optional, List, Callable, Dict, Any +import inspect +import pickle from types import GeneratorType -from pydantic import BaseModel, StrictBool, StrictFloat, PositiveInt, constr +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + import catalogue +import numpy +import pytest +from pydantic import BaseModel, PositiveInt, StrictBool, StrictFloat, constr + import thinc.config +from thinc.api import Config, Model, NumpyOps, RAdam from thinc.config import ConfigValidationError from thinc.types import Generator, Ragged -from thinc.api import Config, RAdam, Model, NumpyOps from thinc.util import partial -import numpy -import inspect -import pickle from .util import make_tempdir - EXAMPLE_CONFIG = """ [optimizer] @optimizers = "Adam.v1" diff --git a/thinc/tests/test_import__all__.py b/thinc/tests/test_import__all__.py index 226783ec2..fb0a08a20 100644 --- a/thinc/tests/test_import__all__.py +++ b/thinc/tests/test_import__all__.py @@ -1,9 +1,9 @@ import ast +import importlib from collections import namedtuple -from typing import Tuple, List +from typing import List, Tuple import pytest -import importlib _Import = namedtuple("_Import", ["module", "name", "alias"]) diff --git a/thinc/tests/test_indexing.py b/thinc/tests/test_indexing.py index 98fbc4437..2703e5dfa 100644 --- a/thinc/tests/test_indexing.py +++ b/thinc/tests/test_indexing.py @@ -1,7 +1,8 @@ -import pytest import numpy +import pytest from numpy.testing import assert_allclose -from thinc.types import Ragged, Pairs + +from thinc.types import Pairs, Ragged @pytest.fixture diff --git a/thinc/tests/test_initializers.py b/thinc/tests/test_initializers.py index 4f7c8f2cc..628398be0 100644 --- a/thinc/tests/test_initializers.py +++ b/thinc/tests/test_initializers.py @@ -1,8 +1,14 @@ +import numpy import pytest -from thinc.api import glorot_uniform_init, zero_init, uniform_init, normal_init -from thinc.api import NumpyOps + from thinc import registry -import numpy +from thinc.api import ( + NumpyOps, + glorot_uniform_init, + normal_init, + uniform_init, + zero_init, +) @pytest.mark.parametrize( diff --git a/thinc/tests/test_loss.py b/thinc/tests/test_loss.py index 75206d240..fc100dd3a 100644 --- a/thinc/tests/test_loss.py +++ b/thinc/tests/test_loss.py @@ -1,8 +1,13 @@ -import pytest import numpy -from thinc.api import CategoricalCrossentropy, SequenceCategoricalCrossentropy -from thinc.api import L2Distance, CosineDistance +import pytest + from thinc import registry +from thinc.api import ( + CategoricalCrossentropy, + CosineDistance, + L2Distance, + SequenceCategoricalCrossentropy, +) # some simple arrays scores0 = numpy.zeros((3, 3), dtype="f") diff --git a/thinc/tests/test_optimizers.py b/thinc/tests/test_optimizers.py index a31dbce32..4e336640b 100644 --- a/thinc/tests/test_optimizers.py +++ b/thinc/tests/test_optimizers.py @@ -1,6 +1,7 @@ -import pytest -from thinc.api import registry, Optimizer import numpy +import pytest + +from thinc.api import Optimizer, registry def _test_schedule_valid(): diff --git a/thinc/tests/test_schedules.py b/thinc/tests/test_schedules.py index d975d2dbd..31a8f4e3b 100644 --- a/thinc/tests/test_schedules.py +++ b/thinc/tests/test_schedules.py @@ -1,5 +1,12 @@ -from thinc.api import decaying, compounding, slanted_triangular, constant_then -from thinc.api import constant, warmup_linear, cyclic_triangular +from thinc.api import ( + compounding, + constant, + constant_then, + cyclic_triangular, + decaying, + slanted_triangular, + warmup_linear, +) def test_decaying_rate(): diff --git a/thinc/tests/test_serialize.py b/thinc/tests/test_serialize.py index b89fc2d94..a457cd237 100644 --- a/thinc/tests/test_serialize.py +++ b/thinc/tests/test_serialize.py @@ -1,7 +1,16 @@ import pytest import srsly -from thinc.api import with_array, Linear, Maxout, chain, Model, Shim -from thinc.api import serialize_attr, deserialize_attr + +from thinc.api import ( + Linear, + Maxout, + Model, + Shim, + chain, + deserialize_attr, + serialize_attr, + with_array, +) @pytest.fixture diff --git a/thinc/tests/test_types.py b/thinc/tests/test_types.py index 249ce2b80..ebfbb6fb6 100644 --- a/thinc/tests/test_types.py +++ b/thinc/tests/test_types.py @@ -1,8 +1,17 @@ import numpy -from pydantic import create_model, ValidationError -from thinc.types import Floats1d, Floats2d, Floats3d, Floats4d -from thinc.types import Ints1d, Ints2d, Ints3d, Ints4d import pytest +from pydantic import ValidationError, create_model + +from thinc.types import ( + Floats1d, + Floats2d, + Floats3d, + Floats4d, + Ints1d, + Ints2d, + Ints3d, + Ints4d, +) @pytest.mark.parametrize( diff --git a/thinc/tests/test_util.py b/thinc/tests/test_util.py index 8d2d0058d..77f6a7b86 100644 --- a/thinc/tests/test_util.py +++ b/thinc/tests/test_util.py @@ -1,11 +1,16 @@ -import pytest import numpy +import pytest from hypothesis import given -from thinc.api import get_width, Ragged, Padded -from thinc.util import get_array_module, is_numpy_array, to_categorical -from thinc.util import is_cupy_array -from thinc.util import convert_recursive + +from thinc.api import Padded, Ragged, get_width from thinc.types import ArgsKwargs +from thinc.util import ( + convert_recursive, + get_array_module, + is_cupy_array, + is_numpy_array, + to_categorical, +) from . import strategies diff --git a/thinc/tests/util.py b/thinc/tests/util.py index 7440a4b6e..defb9a2f6 100644 --- a/thinc/tests/util.py +++ b/thinc/tests/util.py @@ -1,10 +1,12 @@ import contextlib -from pathlib import Path -import tempfile import shutil -from thinc.api import Linear, Ragged, Padded, ArgsKwargs +import tempfile +from pathlib import Path + import numpy import pytest + +from thinc.api import ArgsKwargs, Linear, Padded, Ragged from thinc.util import has_cupy, is_cupy_array, is_numpy_array diff --git a/thinc/types.py b/thinc/types.py index c7e6a00f6..9a9487cb4 100644 --- a/thinc/types.py +++ b/thinc/types.py @@ -1,11 +1,28 @@ -from typing import Union, Tuple, Sized, Container, Any, TypeVar, Callable -from typing import Iterable, Iterator, Sequence, Dict, Generic, cast -from typing import Optional, List, overload +import sys from abc import abstractmethod from dataclasses import dataclass +from typing import ( + Any, + Callable, + Container, + Dict, + Generic, + Iterable, + Iterator, + List, + Optional, + Sequence, + Sized, + Tuple, + TypeVar, + Union, + cast, + overload, +) + import numpy -import sys -from .compat import has_cupy, cupy + +from .compat import cupy, has_cupy if has_cupy: get_array_module = cupy.get_array_module @@ -14,9 +31,9 @@ # Use typing_extensions for Python versions < 3.8 if sys.version_info < (3, 8): - from typing_extensions import Protocol, Literal + from typing_extensions import Literal, Protocol else: - from typing import Protocol, Literal # noqa: F401 + from typing import Literal, Protocol # noqa: F401 # fmt: off diff --git a/thinc/util.py b/thinc/util.py index aabab9ecb..9a1aaf65b 100644 --- a/thinc/util.py +++ b/thinc/util.py @@ -1,30 +1,55 @@ -from typing import Any, Union, Sequence, cast, Dict, Optional, Callable, TypeVar -from typing import List, Mapping, Tuple -import numpy -import platform -from packaging.version import Version -import random +import contextlib import functools -from wasabi import table -from pydantic import create_model, ValidationError import inspect import os +import platform +import random import tempfile import threading -import contextlib from contextvars import ContextVar from dataclasses import dataclass -from .compat import has_cupy, has_mxnet, has_torch, has_tensorflow -from .compat import has_cupy_gpu, has_torch_cuda_gpu, has_gpu -from .compat import has_torch_mps -from .compat import torch, cupy, tensorflow as tf, mxnet as mx, cupy_from_dlpack +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + cast, +) + +import numpy +from packaging.version import Version +from pydantic import ValidationError, create_model +from wasabi import table + +from .compat import ( + cupy, + cupy_from_dlpack, + has_cupy, + has_cupy_gpu, + has_gpu, + has_mxnet, + has_tensorflow, + has_torch, + has_torch_cuda_gpu, + has_torch_mps, +) +from .compat import mxnet as mx +from .compat import tensorflow as tf +from .compat import torch DATA_VALIDATION: ContextVar[bool] = ContextVar("DATA_VALIDATION", default=False) -from .types import ArrayXd, ArgsKwargs, Ragged, Padded, FloatsXd, IntsXd # noqa: E402 -from . import types # noqa: E402 from typing import TYPE_CHECKING +from . import types # noqa: E402 +from .types import ArgsKwargs, ArrayXd, FloatsXd, IntsXd, Padded, Ragged # noqa: E402 + if TYPE_CHECKING: from .api import Ops @@ -174,7 +199,7 @@ def set_active_gpu(gpu_id: int) -> "cupy.cuda.Device": # pragma: no cover def require_cpu() -> bool: # pragma: no cover """Use CPU through best available backend.""" - from .backends import set_current_ops, get_ops + from .backends import get_ops, set_current_ops ops = get_ops("cpu") set_current_ops(ops) @@ -190,7 +215,7 @@ def prefer_gpu(gpu_id: int = 0) -> bool: # pragma: no cover def require_gpu(gpu_id: int = 0) -> bool: # pragma: no cover - from .backends import set_current_ops, CupyOps, MPSOps + from .backends import CupyOps, MPSOps, set_current_ops if platform.system() == "Darwin" and not has_torch_mps: if has_torch: