Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support offload/reload optimizer's states for custom device #9467

Merged
merged 1 commit into from
Dec 2, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 13 additions & 7 deletions paddlenlp/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@
)
from ..utils.import_utils import is_datasets_available, is_paddle_cuda_available
from ..utils.log import logger
from ..utils.tools import get_env_device
from .argparser import strtobool
from .integrations import get_reporting_integration_callbacks
from .plugins.timer import RuntimeTimer, get_timers, set_timers
Expand Down Expand Up @@ -1768,10 +1769,6 @@
return self.optimizer

def _apply_to_optimizer(self, action):
if "gpu" not in paddle.device.get_device():
logger.warning("offload/reload optimizer's states is only supported on GPU devices.")
return

attributes = [
("_accumulators", "_moment1_acc_str"),
("_accumulators", "_moment2_acc_str"),
Expand All @@ -1786,13 +1783,22 @@
target_attr = target_attr[getattr(self.optimizer, attr[1])]

for key, value in target_attr.items():
target_attr[key] = getattr(value, action)()
if get_env_device() == "gpu":
target_attr[key] = getattr(value, action)()
else:

Check warning on line 1788 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L1786-L1788

Added lines #L1786 - L1788 were not covered by tests
target_attr[key] = getattr(value, "to")(action)

def _offload_optimizer(self):
self._apply_to_optimizer("pin_memory")
if get_env_device() == "gpu":
self._apply_to_optimizer("pin_memory")
else:

Check warning on line 1794 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L1793-L1794

Added lines #L1793 - L1794 were not covered by tests
self._apply_to_optimizer("cpu")

def _reload_optimizer(self):
self._apply_to_optimizer("cuda")
if get_env_device() == "gpu":
self._apply_to_optimizer("cuda")
else:

Check warning on line 1800 in paddlenlp/trainer/trainer.py

View check run for this annotation

Codecov / codecov/patch

paddlenlp/trainer/trainer.py#L1799-L1800

Added lines #L1799 - L1800 were not covered by tests
self._apply_to_optimizer(get_env_device())

def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
Expand Down
Loading