Skip to content

Commit

Permalink
missing tests default_root_dir=tmpdir (#6314)
Browse files Browse the repository at this point in the history
* default_root_dir=tmpdir

* miss
  • Loading branch information
Borda committed Mar 4, 2021
1 parent 4f90455 commit b9cf122
Show file tree
Hide file tree
Showing 9 changed files with 72 additions and 59 deletions.
5 changes: 1 addition & 4 deletions tests/models/test_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,10 +167,7 @@ def test_callbacks_state_resume_from_checkpoint(tmpdir):
def get_trainer_args():
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="val_loss", save_last=True)
trainer_args = dict(
default_root_dir=tmpdir, max_steps=1, logger=False, callbacks=[
checkpoint,
callback_capture,
]
default_root_dir=tmpdir, max_steps=1, logger=False, callbacks=[checkpoint, callback_capture]
)
assert checkpoint.best_model_path == ""
assert checkpoint.best_model_score is None
Expand Down
18 changes: 9 additions & 9 deletions tests/models/test_sync_batchnorm.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,16 @@ def test_sync_batchnorm_ddp(tmpdir):
dm.setup(stage=None)

model = SyncBNModule(gpu_count=2, bn_targets=bn_outputs)
ddp = DDPSpawnPlugin(
parallel_devices=[torch.device("cuda", 0), torch.device("cuda", 1)],
num_nodes=1,
sync_batchnorm=True,
cluster_environment=TorchElasticEnvironment(),
find_unused_parameters=True
)

trainer = Trainer(
default_root_dir=tmpdir,
gpus=2,
num_nodes=1,
accelerator='ddp_spawn',
Expand All @@ -115,15 +123,7 @@ def test_sync_batchnorm_ddp(tmpdir):
sync_batchnorm=True,
num_sanity_val_steps=0,
replace_sampler_ddp=False,
plugins=[
DDPSpawnPlugin(
parallel_devices=[torch.device("cuda", 0), torch.device("cuda", 1)],
num_nodes=1,
sync_batchnorm=True,
cluster_environment=TorchElasticEnvironment(),
find_unused_parameters=True
)
]
plugins=[ddp]
)

trainer.fit(model, dm)
Expand Down
60 changes: 29 additions & 31 deletions tests/plugins/test_deepspeed_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,11 @@ def test_deepspeed_precision_choice(amp_backend, tmpdir):
"""

trainer = Trainer(
fast_dev_run=True, default_root_dir=tmpdir, plugins='deepspeed', amp_backend=amp_backend, precision=16
fast_dev_run=True,
default_root_dir=tmpdir,
plugins='deepspeed',
amp_backend=amp_backend,
precision=16,
)

assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin)
Expand Down Expand Up @@ -178,13 +182,11 @@ def test_deepspeed_defaults(tmpdir):

@RunIf(deepspeed=True)
def test_invalid_deepspeed_defaults_no_precision(tmpdir):
"""
Test to ensure that using defaults, if precision is not set to 16, we throw an exception.
"""
"""Test to ensure that using defaults, if precision is not set to 16, we throw an exception."""
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
fast_dev_run=True,
plugins='deepspeed',
)
with pytest.raises(
Expand All @@ -195,27 +197,29 @@ def test_invalid_deepspeed_defaults_no_precision(tmpdir):

@RunIf(min_gpus=1, deepspeed=True)
def test_warn_deepspeed_override_backward(tmpdir):
"""
Test to ensure that if the backward hook in the LightningModule is overridden, we throw a warning.
"""
"""Test to ensure that if the backward hook in the LightningModule is overridden, we throw a warning."""

class TestModel(BoringModel):

def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
return loss.backward()

model = TestModel()
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, plugins=DeepSpeedPlugin(), gpus=1, precision=16)
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
plugins=DeepSpeedPlugin(),
gpus=1,
precision=16,
)
with pytest.warns(UserWarning, match='Overridden backward hook in the LightningModule will be ignored'):
trainer.fit(model)


@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_run_configure_optimizers(tmpdir):
"""
Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation),
whilst using configure_optimizers for optimizers and schedulers.
"""
"""Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation),
whilst using configure_optimizers for optimizers and schedulers."""

class TestModel(BoringModel):

Expand All @@ -234,7 +238,7 @@ def on_train_start(self) -> None:
default_root_dir=tmpdir,
gpus=1,
fast_dev_run=True,
precision=16
precision=16,
)

trainer.fit(model)
Expand Down Expand Up @@ -267,7 +271,7 @@ def on_train_start(self) -> None:
default_root_dir=tmpdir,
gpus=1,
fast_dev_run=True,
precision=16
precision=16,
)

trainer.fit(model)
Expand All @@ -278,9 +282,7 @@ def on_train_start(self) -> None:

@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_custom_precision_params(tmpdir):
"""
Ensure if we modify the FP16 parameters via the DeepSpeedPlugin, the deepspeed config contains these changes.
"""
"""Ensure if we modify the FP16 parameters via the DeepSpeedPlugin, the deepspeed config contains these changes."""

class TestModel(BoringModel):

Expand All @@ -293,24 +295,15 @@ def on_train_start(self) -> None:
raise SystemExit()

model = TestModel()
trainer = Trainer(
plugins=[
DeepSpeedPlugin(
loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10
)
],
precision=16,
gpus=1
)
ds = DeepSpeedPlugin(loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10)
trainer = Trainer(default_root_dir=tmpdir, plugins=[ds], precision=16, gpus=1)
with pytest.raises(SystemExit):
trainer.fit(model)


@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config):
"""
Ensure if we use a config and turn off cpu_offload, that this is set to False within the config.
"""
"""Ensure if we use a config and turn off cpu_offload, that this is set to False within the config."""

deepspeed_zero_config['zero_optimization']['cpu_offload'] = False

Expand All @@ -321,7 +314,12 @@ def on_train_start(self) -> None:
raise SystemExit()

model = TestModel()
trainer = Trainer(plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)], precision=16, gpus=1)
trainer = Trainer(
plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)],
precision=16,
gpus=1,
default_root_dir=tmpdir,
)
with pytest.raises(SystemExit):
trainer.fit(model)

Expand Down
4 changes: 3 additions & 1 deletion tests/plugins/test_rpc_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def on_fit_start(self, trainer, pl_module):

model = BoringModel()
trainer = Trainer(
default_root_dir=str(tmpdir),
fast_dev_run=True,
gpus=gpus,
num_processes=num_processes,
Expand Down Expand Up @@ -76,7 +77,8 @@ def test_rpc_function_calls_ddp(tmpdir):
max_epochs=max_epochs,
gpus=2,
distributed_backend='ddp',
plugins=[plugin]
plugins=[plugin],
default_root_dir=tmpdir,
)

trainer.fit(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def training_step_scalar_with_step_end(tmpdir):
model.training_step_end = model.training_step_end__scalar
model.val_dataloader = None

trainer = Trainer(fast_dev_run=True, weights_summary=None)
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, weights_summary=None)
trainer.fit(model)

# make sure correct steps were called
Expand Down Expand Up @@ -165,7 +165,11 @@ def test_train_step_epoch_end_scalar(tmpdir):
model.training_epoch_end = model.training_epoch_end__scalar
model.val_dataloader = None

trainer = Trainer(max_epochs=1, weights_summary=None)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)

# make sure correct steps were called
Expand Down Expand Up @@ -222,13 +226,13 @@ def test_dpp_reduce_mean_pbar(tmpdir):

trainer = Trainer(
max_epochs=1,
default_root_dir=os.getcwd(),
default_root_dir=tmpdir,
limit_train_batches=10,
limit_test_batches=2,
limit_val_batches=2,
accelerator=distributed_backend,
gpus=2,
precision=32
precision=32,
)

trainer.fit(model)
Expand Down
6 changes: 2 additions & 4 deletions tests/trainer/logging_/test_logger_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,7 @@ def test_auto_add_dataloader_idx(tmpdir, add_dataloader_idx):
""" test that auto_add_dataloader_idx argument works """

class TestModel(BoringModel):

def val_dataloader(self):
dl = super().val_dataloader()
return [dl, dl]
Expand All @@ -495,10 +496,7 @@ def validation_step(self, *args, **kwargs):
model = TestModel()
model.validation_epoch_end = None

trainer = Trainer(
default_root_dir=tmpdir,
max_steps=5
)
trainer = Trainer(default_root_dir=tmpdir, max_steps=5)
trainer.fit(model)
logged = trainer.logged_metrics

Expand Down
6 changes: 3 additions & 3 deletions tests/trainer/logging_/test_train_loop_logging_1_0.py
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ def val_dataloader(self):
assert generated == expected


def test_validation_step_with_string_data_logging():
def test_validation_step_with_string_data_logging(tmpdir):

class TestModel(BoringModel):

Expand All @@ -436,7 +436,7 @@ def validation_step(self, batch, batch_idx):
# model
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
Expand Down Expand Up @@ -491,7 +491,7 @@ def validation_step(self, batch, batch_idx):
# model
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
Expand Down
6 changes: 5 additions & 1 deletion tests/trainer/test_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,11 @@ def on_load_checkpoint(self, _):
state = pl_load(ckpt)

# Resume training
new_trainer = Trainer(resume_from_checkpoint=ckpt, max_epochs=2)
new_trainer = Trainer(
default_root_dir=tmpdir,
resume_from_checkpoint=ckpt,
max_epochs=2,
)
new_trainer.fit(next_model)
assert state["global_step"] + next_model.num_batches_seen == trainer.num_training_batches * trainer.max_epochs
assert next_model.num_on_load_checkpoint_called == 1
Expand Down
14 changes: 12 additions & 2 deletions tests/trainer/test_trainer_tricks.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,12 @@ def test_num_training_batches(tmpdir):
"""
# when we have fewer batches in the dataloader we should use those instead of the limit
model = EvalModelTemplate()
trainer = Trainer(limit_val_batches=100, limit_train_batches=100, max_epochs=1)
trainer = Trainer(
limit_val_batches=100,
limit_train_batches=100,
max_epochs=1,
default_root_dir=tmpdir,
)
trainer.fit(model)

assert len(model.train_dataloader()) == 10
Expand All @@ -45,7 +50,12 @@ def test_num_training_batches(tmpdir):

# when we have more batches in the dataloader we should limit them
model = EvalModelTemplate()
trainer = Trainer(limit_val_batches=7, limit_train_batches=7, max_epochs=1)
trainer = Trainer(
limit_val_batches=7,
limit_train_batches=7,
max_epochs=1,
default_root_dir=tmpdir,
)
trainer.fit(model)

assert len(model.train_dataloader()) == 10
Expand Down

0 comments on commit b9cf122

Please sign in to comment.