Skip to content

Commit

Permalink
Test CI error
Browse files Browse the repository at this point in the history
  • Loading branch information
carmocca committed Jun 22, 2021
1 parent 72d5ee3 commit 9c33aac
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 41 deletions.
82 changes: 41 additions & 41 deletions .azure-pipelines/gpu-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -74,46 +74,46 @@ jobs:
displayName: 'Get legacy checkpoints'
- bash: |
python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests -v --junitxml=$(Build.StagingDirectory)/test-results.xml --durations=50
python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests/models/test_hooks.py -v --junitxml=$(Build.StagingDirectory)/test-results.xml --durations=50
displayName: 'Testing: standard'
- bash: |
bash tests/special_tests.sh
displayName: 'Testing: special'
- bash: |
python -m coverage report
python -m coverage xml
python -m coverage html
python -m codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure
ls -l
displayName: 'Statistics'
- task: PublishTestResults@2
displayName: 'Publish test results'
inputs:
testResultsFiles: '$(Build.StagingDirectory)/test-results.xml'
testRunTitle: '$(Agent.OS) - $(Build.DefinitionName) - Python $(python.version)'
condition: succeededOrFailed()

- task: PublishCodeCoverageResults@1
displayName: 'Publish coverage report'
inputs:
codeCoverageTool: 'cobertura'
summaryFileLocation: 'coverage.xml'
reportDirectory: '$(Build.SourcesDirectory)/htmlcov'
testRunTitle: '$(Agent.OS) - $(Build.BuildNumber)[$(Agent.JobName)] - Python $(python.version)'
condition: succeededOrFailed()

- bash: |
python -m pytest benchmarks -v --maxfail=2 --durations=0
displayName: 'Testing: benchmarks'
- script: |
set -e
python -m pytest pl_examples -v --maxfail=2 --durations=0
bash pl_examples/run_examples-args.sh --trainer.gpus 1 --trainer.max_epochs 1 --data.batch_size 64 --trainer.limit_train_batches 5 --trainer.limit_val_batches 3
bash pl_examples/run_ddp-examples.sh --trainer.max_epochs 1 --data.batch_size 32 --trainer.limit_train_batches 2 --trainer.limit_val_batches 2
env:
PL_USE_MOCKED_MNIST: "1"
displayName: 'Examples'
#- bash: |
# bash tests/special_tests.sh
# displayName: 'Testing: special'
#
#- bash: |
# python -m coverage report
# python -m coverage xml
# python -m coverage html
# python -m codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure
# ls -l
# displayName: 'Statistics'
#
#- task: PublishTestResults@2
# displayName: 'Publish test results'
# inputs:
# testResultsFiles: '$(Build.StagingDirectory)/test-results.xml'
# testRunTitle: '$(Agent.OS) - $(Build.DefinitionName) - Python $(python.version)'
# condition: succeededOrFailed()
#
#- task: PublishCodeCoverageResults@1
# displayName: 'Publish coverage report'
# inputs:
# codeCoverageTool: 'cobertura'
# summaryFileLocation: 'coverage.xml'
# reportDirectory: '$(Build.SourcesDirectory)/htmlcov'
# testRunTitle: '$(Agent.OS) - $(Build.BuildNumber)[$(Agent.JobName)] - Python $(python.version)'
# condition: succeededOrFailed()
#
#- bash: |
# python -m pytest benchmarks -v --maxfail=2 --durations=0
# displayName: 'Testing: benchmarks'
#
#- script: |
# set -e
# python -m pytest pl_examples -v --maxfail=2 --durations=0
# bash pl_examples/run_examples-args.sh --trainer.gpus 1 --trainer.max_epochs 1 --data.batch_size 64 --trainer.limit_train_batches 5 --trainer.limit_val_batches 3
# bash pl_examples/run_ddp-examples.sh --trainer.max_epochs 1 --data.batch_size 32 --trainer.limit_train_batches 2 --trainer.limit_val_batches 2
# env:
# PL_USE_MOCKED_MNIST: "1"
# displayName: 'Examples'
22 changes: 22 additions & 0 deletions tests/models/test_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,6 +378,28 @@ def _predict_batch(trainer, model, batches):
return out


@RunIf(deepspeed=True, min_gpus=1)
def test_ci_bug(tmpdir):
called = []
model = HookedModel(called)
callback = HookedCallback(called)
train_batches = 2
val_batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=train_batches,
limit_val_batches=val_batches,
progress_bar_refresh_rate=0,
weights_summary=None,
callbacks=[callback],
gpus=1,
precision=16,
plugins='deepspeed',
)
trainer.fit(model)


@pytest.mark.parametrize(
'kwargs',
[
Expand Down

0 comments on commit 9c33aac

Please sign in to comment.