diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cf9b731c27fd..670392e60ab96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Changed the order of `backward`, `step`, `zero_grad` to `zero_grad`, `backward`, `step` ([#6147](https://github.com/PyTorchLightning/pytorch-lightning/pull/6147)) +- Changed default for DeepSpeed CPU Offload to False, due to prohibitively slow speeds at smaller scale ([#6262](https://github.com/PyTorchLightning/pytorch-lightning/pull/6262)) + + - Renamed `pytorch_lightning.callbacks.swa` to `pytorch_lightning.callbacks.stochastic_weight_avg` ([#6259](https://github.com/PyTorchLightning/pytorch-lightning/pull/6259)) diff --git a/pytorch_lightning/plugins/training_type/deepspeed.py b/pytorch_lightning/plugins/training_type/deepspeed.py index 75e5bf74be643..3f9eccce7073c 100644 --- a/pytorch_lightning/plugins/training_type/deepspeed.py +++ b/pytorch_lightning/plugins/training_type/deepspeed.py @@ -66,7 +66,7 @@ def __init__( self, zero_optimization: bool = True, stage: int = 2, - cpu_offload: bool = True, + cpu_offload: bool = False, contiguous_gradients: bool = True, overlap_comm: bool = True, allgather_partitions: bool = True, @@ -104,7 +104,7 @@ def __init__( stage: Different stages of the ZeRO Optimizer. 0 is disabled, 1 is optimizer state partitioning, 2 is optimizer+gradient state partitioning (default: 2) - cpu_offload: Enable offloading optimizer memory and computation to CPU (default: True) + cpu_offload: Enable offloading optimizer memory and computation to CPU contiguous_gradients: Copies gradients to a continuous buffer as they are produced. Avoids memory fragmentation during backwards. Useful when training large models. (default: True)