Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix amp autocast #6080

Merged
merged 11 commits into from
Feb 19, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion pytorch_lightning/overrides/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,13 @@

from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.warnings import WarningCache

warning_cache = WarningCache()


class _LightningModuleWrapperBase(torch.nn.Module):
class _LightningModuleWrapperBase(DeviceDtypeModuleMixin, torch.nn.Module):

def __init__(self, pl_module: LightningModule):
"""
Expand Down
3 changes: 2 additions & 1 deletion pytorch_lightning/plugins/precision/native_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,4 +91,5 @@ def post_optimizer_step(self, optimizer: Optimizer, optimizer_idx: int) -> None:
@contextmanager
def train_step_context(self) -> Generator[autocast, None, None]:
"""Enable autocast context"""
yield torch.cuda.amp.autocast()
with torch.cuda.amp.autocast():
yield
5 changes: 3 additions & 2 deletions tests/models/test_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def test_amp_single_gpu_ddp_spawn(tmpdir):
model = BoringModel()
# tutils.run_model_test(trainer_options, model)
trainer.fit(model)

assert torch.is_autocast_enabled()
awaelchli marked this conversation as resolved.
Show resolved Hide resolved
assert trainer.state == TrainerState.FINISHED, f"Training failed with {trainer.state}"


Expand Down Expand Up @@ -103,7 +103,7 @@ def test_amp_multi_gpu_ddp_spawn(tmpdir):
model = BoringModel()
# tutils.run_model_test(trainer_options, model)
trainer.fit(model)

assert torch.is_autocast_enabled()
assert trainer.state == TrainerState.FINISHED, f"Training failed with {trainer.state}"


Expand Down Expand Up @@ -152,6 +152,7 @@ def test_amp_gpu_ddp_slurm_managed(tmpdir):
assert trainer.training_type_plugin.cluster_environment.resolve_root_node_address('abc[23-24]') == 'abc23'
generated = trainer.training_type_plugin.cluster_environment.resolve_root_node_address('abc[23-24, 45-40, 40]')
assert generated == 'abc23'
assert torch.is_autocast_enabled()


@pytest.mark.skipif(torch.cuda.is_available(), reason="test is restricted only on CPU")
Expand Down
13 changes: 13 additions & 0 deletions tests/overrides/test_data_parallel.py