diff --git a/.azure-pipelines/gpu-tests.yml b/.azure-pipelines/gpu-tests.yml index 5333bfd867da07..23d8507bb13355 100644 --- a/.azure-pipelines/gpu-tests.yml +++ b/.azure-pipelines/gpu-tests.yml @@ -74,46 +74,46 @@ jobs: displayName: 'Get legacy checkpoints' - bash: | - python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests -v --junitxml=$(Build.StagingDirectory)/test-results.xml --durations=50 + python -m coverage run --source pytorch_lightning -m pytest tests/accelerators/test_ddp.py tests/models/test_hooks.py -v --junitxml=$(Build.StagingDirectory)/test-results.xml --durations=50 displayName: 'Testing: standard' - - bash: | - bash tests/special_tests.sh - displayName: 'Testing: special' - - - bash: | - python -m coverage report - python -m coverage xml - python -m coverage html - python -m codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure - ls -l - displayName: 'Statistics' - - - task: PublishTestResults@2 - displayName: 'Publish test results' - inputs: - testResultsFiles: '$(Build.StagingDirectory)/test-results.xml' - testRunTitle: '$(Agent.OS) - $(Build.DefinitionName) - Python $(python.version)' - condition: succeededOrFailed() - - - task: PublishCodeCoverageResults@1 - displayName: 'Publish coverage report' - inputs: - codeCoverageTool: 'cobertura' - summaryFileLocation: 'coverage.xml' - reportDirectory: '$(Build.SourcesDirectory)/htmlcov' - testRunTitle: '$(Agent.OS) - $(Build.BuildNumber)[$(Agent.JobName)] - Python $(python.version)' - condition: succeededOrFailed() - - - bash: | - python -m pytest benchmarks -v --maxfail=2 --durations=0 - displayName: 'Testing: benchmarks' - - - script: | - set -e - python -m pytest pl_examples -v --maxfail=2 --durations=0 - bash pl_examples/run_examples-args.sh --trainer.gpus 1 --trainer.max_epochs 1 --data.batch_size 64 --trainer.limit_train_batches 5 --trainer.limit_val_batches 3 - bash pl_examples/run_ddp-examples.sh --trainer.max_epochs 1 --data.batch_size 32 --trainer.limit_train_batches 2 --trainer.limit_val_batches 2 - env: - PL_USE_MOCKED_MNIST: "1" - displayName: 'Examples' + #- bash: | + # bash tests/special_tests.sh + # displayName: 'Testing: special' +# + #- bash: | + # python -m coverage report + # python -m coverage xml + # python -m coverage html + # python -m codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure + # ls -l + # displayName: 'Statistics' +# + #- task: PublishTestResults@2 + # displayName: 'Publish test results' + # inputs: + # testResultsFiles: '$(Build.StagingDirectory)/test-results.xml' + # testRunTitle: '$(Agent.OS) - $(Build.DefinitionName) - Python $(python.version)' + # condition: succeededOrFailed() +# + #- task: PublishCodeCoverageResults@1 + # displayName: 'Publish coverage report' + # inputs: + # codeCoverageTool: 'cobertura' + # summaryFileLocation: 'coverage.xml' + # reportDirectory: '$(Build.SourcesDirectory)/htmlcov' + # testRunTitle: '$(Agent.OS) - $(Build.BuildNumber)[$(Agent.JobName)] - Python $(python.version)' + # condition: succeededOrFailed() +# + #- bash: | + # python -m pytest benchmarks -v --maxfail=2 --durations=0 + # displayName: 'Testing: benchmarks' +# + #- script: | + # set -e + # python -m pytest pl_examples -v --maxfail=2 --durations=0 + # bash pl_examples/run_examples-args.sh --trainer.gpus 1 --trainer.max_epochs 1 --data.batch_size 64 --trainer.limit_train_batches 5 --trainer.limit_val_batches 3 + # bash pl_examples/run_ddp-examples.sh --trainer.max_epochs 1 --data.batch_size 32 --trainer.limit_train_batches 2 --trainer.limit_val_batches 2 + # env: + # PL_USE_MOCKED_MNIST: "1" + # displayName: 'Examples' diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index 9e557abeb879e5..08a6dd40f49aed 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -378,6 +378,28 @@ def _predict_batch(trainer, model, batches): return out +@RunIf(deepspeed=True, min_gpus=1) +def test_ci_bug(tmpdir): + called = [] + model = HookedModel(called) + callback = HookedCallback(called) + train_batches = 2 + val_batches = 2 + trainer = Trainer( + default_root_dir=tmpdir, + max_epochs=1, + limit_train_batches=train_batches, + limit_val_batches=val_batches, + progress_bar_refresh_rate=0, + weights_summary=None, + callbacks=[callback], + gpus=1, + precision=16, + plugins='deepspeed', + ) + trainer.fit(model) + + @pytest.mark.parametrize( 'kwargs', [