Skip to content

Commit

Permalink
✅ Reduce run time for Mapde test (#627)
Browse files Browse the repository at this point in the history
- Reduce run time for Mapde test.
  • Loading branch information
shaneahmed committed Jul 1, 2023
1 parent e4deac4 commit d0d4ed6
Showing 1 changed file with 8 additions and 9 deletions.
17 changes: 8 additions & 9 deletions tests/models/test_arch_mapde.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,20 @@
import numpy as np
import torch

from tiatoolbox import utils
from tiatoolbox.models import MapDe
from tiatoolbox.models.architecture import fetch_pretrained_weights
from tiatoolbox.utils import env_detection as toolbox_env
from tiatoolbox.utils.misc import select_device
from tiatoolbox.wsicore.wsireader import WSIReader

ON_GPU = toolbox_env.has_gpu()


def _load_mapde(tmp_path, name):
"""Loads MapDe model with specified weights."""
model = MapDe()
fetch_pretrained_weights(name, f"{tmp_path}/weights.pth")
map_location = utils.misc.select_device(utils.env_detection.has_gpu())
map_location = select_device(ON_GPU)
pretrained = torch.load(f"{tmp_path}/weights.pth", map_location=map_location)
model.load_state_dict(pretrained)

Expand All @@ -34,14 +37,10 @@ def test_functionality(remote_sample, tmp_path):
(0, 0, 252, 252), resolution=0.50, units="mpp", coord_space="resolution"
)

model = _load_mapde(tmp_path=tmp_path, name="mapde-crchisto")
model = _load_mapde(tmp_path=tmp_path, name="mapde-conic")
patch = model.preproc(patch)
batch = torch.from_numpy(patch)[None]
output = model.infer_batch(model, batch, on_gpu=False)
output = model.postproc(output[0])
assert np.all(output[0:2] == [[99, 178], [64, 218]])

model = _load_mapde(tmp_path=tmp_path, name="mapde-conic")
output = model.infer_batch(model, batch, on_gpu=False)
model = model.to(select_device(ON_GPU))
output = model.infer_batch(model, batch, on_gpu=ON_GPU)
output = model.postproc(output[0])
assert np.all(output[0:2] == [[19, 171], [53, 89]])

0 comments on commit d0d4ed6

Please sign in to comment.