Skip to content

Commit

Permalink
use gt rather than ge
Browse files Browse the repository at this point in the history
  • Loading branch information
leej3 committed May 29, 2024
1 parent 741dbbe commit 06dfa5e
Show file tree
Hide file tree
Showing 6 changed files with 15 additions and 15 deletions.
4 changes: 2 additions & 2 deletions ignite/distributed/comp_models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch
from packaging.version import Version

_torch_version_ge_112 = Version(torch.__version__) > Version("1.12.0")
_torch_version_gt_112 = Version(torch.__version__) > Version("1.12.0")


class ComputationModel(metaclass=ABCMeta):
Expand Down Expand Up @@ -329,7 +329,7 @@ def get_node_rank(self) -> int:
def device(self) -> torch.device:
if torch.cuda.is_available():
return torch.device("cuda")
if _torch_version_ge_112 and torch.backends.mps.is_available():
if _torch_version_gt_112 and torch.backends.mps.is_available():
return torch.device("mps")
return torch.device("cpu")

Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/distributed/comp_models/test_base.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
import torch

from ignite.distributed.comp_models.base import _SerialModel, _torch_version_ge_112, ComputationModel
from ignite.distributed.comp_models.base import _SerialModel, _torch_version_gt_112, ComputationModel


def test_serial_model():
Expand All @@ -16,7 +16,7 @@ def test_serial_model():
assert model.get_node_rank() == 0
if torch.cuda.is_available():
assert model.device().type == "cuda"
elif _torch_version_ge_112 and torch.backends.mps.is_available():
elif _torch_version_gt_112 and torch.backends.mps.is_available():
assert model.device().type == "mps"
else:
assert model.device().type == "cpu"
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/distributed/test_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

import ignite.distributed as idist
from ignite.distributed.auto import auto_dataloader, auto_model, auto_optim, DistributedProxySampler
from ignite.distributed.comp_models.base import _torch_version_ge_112
from ignite.distributed.comp_models.base import _torch_version_gt_112
from tests.ignite import is_mps_available_and_functional


Expand Down Expand Up @@ -182,7 +182,7 @@ def _test_auto_model_optimizer(ws, device):


@pytest.mark.skipif(
(not _torch_version_ge_112) or (torch.backends.mps.is_available() and not is_mps_available_and_functional()),
(not _torch_version_gt_112) or (torch.backends.mps.is_available() and not is_mps_available_and_functional()),
reason="Skip if MPS not functional",
)
def test_auto_methods_no_dist():
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/distributed/test_launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from packaging.version import Version

import ignite.distributed as idist
from ignite.distributed.comp_models.base import _torch_version_ge_112
from ignite.distributed.comp_models.base import _torch_version_gt_112
from ignite.distributed.utils import has_hvd_support, has_native_dist_support, has_xla_support
from tests.ignite import is_mps_available_and_functional

Expand Down Expand Up @@ -57,7 +57,7 @@ def execute(cmd, env=None):


@pytest.mark.skipif(
(not _torch_version_ge_112) or (torch.backends.mps.is_available() and not is_mps_available_and_functional()),
(not _torch_version_gt_112) or (torch.backends.mps.is_available() and not is_mps_available_and_functional()),
reason="Skip if MPS not functional",
)
def test_check_idist_parallel_no_dist(exec_filepath):
Expand Down
6 changes: 3 additions & 3 deletions tests/ignite/distributed/utils/test_serial.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch

import ignite.distributed as idist
from ignite.distributed.comp_models.base import _torch_version_ge_112
from ignite.distributed.comp_models.base import _torch_version_gt_112
from tests.ignite.distributed.utils import (
_sanity_check,
_test_distrib__get_max_length,
Expand All @@ -18,7 +18,7 @@ def test_no_distrib(capsys):
assert idist.backend() is None
if torch.cuda.is_available():
assert idist.device().type == "cuda"
elif _torch_version_ge_112 and torch.backends.mps.is_available():
elif _torch_version_gt_112 and torch.backends.mps.is_available():
assert idist.device().type == "mps"
else:
assert idist.device().type == "cpu"
Expand All @@ -41,7 +41,7 @@ def test_no_distrib(capsys):
assert "ignite.distributed.utils INFO: backend: None" in out[-1]
if torch.cuda.is_available():
assert "ignite.distributed.utils INFO: device: cuda" in out[-1]
elif _torch_version_ge_112 and torch.backends.mps.is_available():
elif _torch_version_gt_112 and torch.backends.mps.is_available():
assert "ignite.distributed.utils INFO: device: mps" in out[-1]
else:
assert "ignite.distributed.utils INFO: device: cpu" in out[-1]
Expand Down
8 changes: 4 additions & 4 deletions tests/ignite/engine/test_create_supervised.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from torch.optim import SGD

import ignite.distributed as idist
from ignite.distributed.comp_models.base import _torch_version_ge_112
from ignite.distributed.comp_models.base import _torch_version_gt_112
from ignite.engine import (
_check_arg,
create_supervised_evaluator,
Expand Down Expand Up @@ -487,7 +487,7 @@ def test_create_supervised_trainer_on_cuda():
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device)


@pytest.mark.skipif(not (_torch_version_ge_112 and is_mps_available_and_functional()), reason="Skip if no MPS")
@pytest.mark.skipif(not (_torch_version_gt_112 and is_mps_available_and_functional()), reason="Skip if no MPS")
def test_create_supervised_trainer_on_mps():
model_device = trainer_device = "mps"
_test_create_supervised_trainer_wrong_accumulation(model_device=model_device, trainer_device=trainer_device)
Expand Down Expand Up @@ -668,14 +668,14 @@ def test_create_supervised_evaluator_on_cuda_with_model_on_cpu():
_test_mocked_supervised_evaluator(evaluator_device="cuda")


@pytest.mark.skipif(not (_torch_version_ge_112 and is_mps_available_and_functional()), reason="Skip if no MPS")
@pytest.mark.skipif(not (_torch_version_gt_112 and is_mps_available_and_functional()), reason="Skip if no MPS")
def test_create_supervised_evaluator_on_mps():
model_device = evaluator_device = "mps"
_test_create_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)
_test_mocked_supervised_evaluator(model_device=model_device, evaluator_device=evaluator_device)


@pytest.mark.skipif(not (_torch_version_ge_112 and is_mps_available_and_functional()), reason="Skip if no MPS")
@pytest.mark.skipif(not (_torch_version_gt_112 and is_mps_available_and_functional()), reason="Skip if no MPS")
def test_create_supervised_evaluator_on_mps_with_model_on_cpu():
_test_create_supervised_evaluator(evaluator_device="mps")
_test_mocked_supervised_evaluator(evaluator_device="mps")
Expand Down

0 comments on commit 06dfa5e

Please sign in to comment.