Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes for slow tests #671

Merged
merged 4 commits into from
May 24, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion thinc/compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
and not torch.cuda.amp.common.amp_definitely_not_available()
)
except ImportError: # pragma: no cover
torch = None
torch = None # type: ignore
has_torch = False
has_torch_gpu = False
has_torch_amp = False
Expand Down
3 changes: 2 additions & 1 deletion thinc/tests/layers/test_basic_tagger.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,14 @@
def ancora():
pytest.importorskip("ml_datasets")
import ml_datasets

return ml_datasets.ud_ancora_pos_tags()


def create_embed_relu_relu_softmax(depth, width, vector_length):
with Model.define_operators({">>": chain}):
model = strings2arrays() >> with_array(
HashEmbed(width, vector_length)
HashEmbed(width, vector_length, column=0)
>> expand_window(window_size=1)
>> Relu(width, width * 3)
>> Relu(width, width)
Expand Down
8 changes: 8 additions & 0 deletions thinc/tests/layers/test_mnist.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import pytest
from thinc.api import Relu, Softmax, chain, clone, Adam
from thinc.api import PyTorchWrapper, TensorFlowWrapper
from thinc.api import get_current_ops
from thinc.compat import has_torch, has_tensorflow


Expand Down Expand Up @@ -80,16 +81,23 @@ def test_small_end_to_end(width, nb_epoch, min_score, create_model, mnist):
optimizer = Adam(0.001)
losses = []
scores = []
ops = get_current_ops()

for i in range(nb_epoch):
for X, Y in model.ops.multibatch(batch_size, train_X, train_Y, shuffle=True):
Yh, backprop = model.begin_update(X)
# Ensure that the tensor is type-compatible with the current backend.
Yh = ops.asarray(Yh)

backprop(Yh - Y)
model.finish_update(optimizer)
losses.append(((Yh - Y) ** 2).sum())
correct = 0
total = 0
for X, Y in model.ops.multibatch(batch_size, dev_X, dev_Y):
Yh = model.predict(X)
Yh = ops.asarray(Yh)

correct += (Yh.argmax(axis=1) == Y.argmax(axis=1)).sum()
total += Yh.shape[0]
score = correct / total
Expand Down
12 changes: 4 additions & 8 deletions thinc/tests/regression/issue519/program.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,12 @@
n_hidden = 32
dropout = 0.2

model1 = chain(
Relu(nO=n_hidden, dropout=dropout),
Relu(nO=n_hidden, dropout=dropout),
Softmax()
model1: Model[Floats2d, Floats2d] = chain(
Relu(nO=n_hidden, dropout=dropout), Relu(nO=n_hidden, dropout=dropout), Softmax()
)

model2 = chain(
Relu(nO=n_hidden, dropout=dropout),
Relu(nO=n_hidden, dropout=dropout),
Softmax()
model2: Model[Floats2d, Floats2d] = chain(
Relu(nO=n_hidden, dropout=dropout), Relu(nO=n_hidden, dropout=dropout), Softmax()
)

model3: Model[Floats2d, Floats2d] = concatenate(*[model1, model2])
8 changes: 6 additions & 2 deletions thinc/tests/regression/issue519/test_issue519.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import subprocess
import sys

try:
import importlib.resources as importlib_resources
except ImportError:
Expand All @@ -16,10 +18,12 @@ def test_issue519():
This test can take up to 45 seconds, and is thus marked as slow.
"""
# Determine the name of the parent module (which contains the test program)
parent_module_name = __name__[:__name__.rfind(".")]
parent_module_name = __name__[: __name__.rfind(".")]

# Load test program that calls a Thinc API with variadic arguments
program_text = importlib_resources.read_text(parent_module_name, "program.py")

# Ask Mypy to type-check the loaded program text
subprocess.run(["mypy", "--command", program_text], check=True)
subprocess.run(
[sys.executable, "-m", "mypy", "--command", program_text], check=True
)