diff --git a/nemo/lightning/pytorch/callbacks/model_checkpoint.py b/nemo/lightning/pytorch/callbacks/model_checkpoint.py index b11a7bf84f0f..7ebeed138d2c 100644 --- a/nemo/lightning/pytorch/callbacks/model_checkpoint.py +++ b/nemo/lightning/pytorch/callbacks/model_checkpoint.py @@ -430,13 +430,6 @@ def _save_checkpoint(self, trainer: 'pytorch_lightning.Trainer', filepath: str) not self.save_optim_on_train_end and trainer.global_step == trainer.max_steps ) - ## PEFT training must have save_weights_only=False to use the on_save_checkpoint callback. - ## (See https://github.com/Lightning-AI/pytorch-lightning/blob/bc3c9c536dc88bfa9a46f63fbce22b382a86a9cb/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py#L487-L492) - # breakpoint() - # from nemo.lightning.pytorch.callbacks import PEFT - # if any(isinstance(callback, PEFT) for callback in trainer.callbacks): - # save_weights_only = False - # Async save passes the finalization function to checkpoint_io, # sync save calls the finalization function immediately after save. finalize_fn = self._get_finalize_save_checkpoint_callback(trainer, filepath, trainer.global_step)