-
Notifications
You must be signed in to change notification settings - Fork 3.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add branch condition for calling move to device in prefetch (FSDP 3/n) #6342
Changes from 4 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12,12 +12,14 @@ | |
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
import os | ||
from unittest import mock | ||
from unittest.mock import patch | ||
|
||
import pytest | ||
import torch | ||
|
||
from pytorch_lightning import Trainer | ||
from pytorch_lightning.plugins import DDPPlugin, DDPSpawnPlugin | ||
from tests.accelerators import ddp_model, DDPLauncher | ||
from tests.helpers.boring_model import BoringModel | ||
from tests.helpers.runif import RunIf | ||
|
@@ -91,7 +93,6 @@ def test_torch_distributed_backend_env_variables(tmpdir): | |
_environ = {"PL_TORCH_DISTRIBUTED_BACKEND": "undefined", "CUDA_VISIBLE_DEVICES": "0,1", "WORLD_SIZE": "2"} | ||
with patch.dict(os.environ, _environ), \ | ||
patch('torch.cuda.device_count', return_value=2): | ||
|
||
with pytest.raises(ValueError, match="Invalid backend: 'undefined'"): | ||
model = BoringModel() | ||
trainer = Trainer( | ||
|
@@ -102,3 +103,28 @@ def test_torch_distributed_backend_env_variables(tmpdir): | |
logger=False, | ||
) | ||
trainer.fit(model) | ||
|
||
|
||
@pytest.mark.parametrize('move_to_device_pre_dispatch_enabled', [True, False]) | ||
@mock.patch('pytorch_lightning.plugins.DDPPlugin.model_to_device') | ||
def test_move_to_device_in_pre_dispatch(mock_model_to_device, tmpdir, move_to_device_pre_dispatch_enabled): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Two things:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You might be able to do so with a callback, applying the patch context manager. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You can make patch conditional on parametrize. with the return object. |
||
""" | ||
Test if ``call_move_to_device_hook_in_pre_dispatch`` is disabled we do not move to device till later | ||
in training. | ||
""" | ||
|
||
with mock.patch( | ||
f'pytorch_lightning.plugins.DDPPlugin.call_move_to_device_hook_in_pre_dispatch', | ||
move_to_device_pre_dispatch_enabled | ||
): | ||
model = BoringModel() | ||
trainer = Trainer( | ||
default_root_dir=tmpdir, fast_dev_run=True, accelerator='ddp', plugins=DDPPlugin(), num_processes=1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do you need to pass |
||
) | ||
trainer.fit(model) | ||
|
||
# Check if mocked device was called. Since we're on CPU, model_to_device does nothing anyway. | ||
if move_to_device_pre_dispatch_enabled: | ||
mock_model_to_device.assert_called() | ||
else: | ||
mock_model_to_device.assert_not_called() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should this be in
ParallelPlugin
?