diff --git a/examples/pissa_finetuning/README.md b/examples/pissa_finetuning/README.md index b8d3be689b..bd4e7440da 100644 --- a/examples/pissa_finetuning/README.md +++ b/examples/pissa_finetuning/README.md @@ -71,7 +71,7 @@ The main advantage of PiSSA is concentrated during the training phase. For a tra peft_model.save_pretrained(output_dir) # Given the matrices $A_0$ and $B_0$, initialized by PiSSA and untrained, and the trained matrices $A$ and $B$, # we can convert these to LoRA by setting $\Delta W = A \times B - A_0 \times B_0 = [A \mid A_0] \times [B \mid -B_0]^T = A'B'$. -peft_model.save_pretrained(output_dir, convert_pissa_to_lora="pissa_init") +peft_model.save_pretrained(output_dir, path_initial_model_for_weight_conversion="pissa_init") ``` This conversion enables the loading of LoRA on top of a standard base model: diff --git a/examples/pissa_finetuning/pissa_finetuning.py b/examples/pissa_finetuning/pissa_finetuning.py index e125cacec2..3146601bf9 100644 --- a/examples/pissa_finetuning/pissa_finetuning.py +++ b/examples/pissa_finetuning/pissa_finetuning.py @@ -136,7 +136,7 @@ class ScriptArguments(SFTConfig): if script_args.convert_pissa_to_lora: peft_model.save_pretrained( os.path.join(script_args.output_dir, "pissa_lora"), - convert_pissa_to_lora=os.path.join(script_args.residual_model_name_or_path, "pissa_init"), + path_initial_model_for_weight_conversion=os.path.join(script_args.residual_model_name_or_path, "pissa_init"), ) else: peft_model.save_pretrained( diff --git a/setup.py b/setup.py index 1659facd0e..e232118956 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ from setuptools import find_packages, setup -VERSION = "0.13.3.dev0" +VERSION = "0.14.0" extras = {} extras["quality"] = [ diff --git a/src/peft/__init__.py b/src/peft/__init__.py index dda32d64e8..cc16af8ffb 100644 --- a/src/peft/__init__.py +++ b/src/peft/__init__.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = "0.13.3.dev0" +__version__ = "0.14.0" from .auto import ( AutoPeftModel, diff --git a/src/peft/peft_model.py b/src/peft/peft_model.py index 867f2bee62..5aea83e44a 100644 --- a/src/peft/peft_model.py +++ b/src/peft/peft_model.py @@ -229,7 +229,6 @@ def save_pretrained( selected_adapters: Optional[list[str]] = None, save_embedding_layers: Union[str, bool] = "auto", is_main_process: bool = True, - convert_pissa_to_lora: Optional[str] = None, path_initial_model_for_weight_conversion: Optional[str] = None, **kwargs: Any, ) -> None: @@ -253,8 +252,6 @@ def save_pretrained( is_main_process (`bool`, *optional*): Whether the process calling this is the main process or not. Will default to `True`. Will not save the checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). - convert_pissa_to_lora (`str, *optional*`): - Deprecated. Use `path_initial_model_for_weight_conversion` instead. path_initial_model_for_weight_conversion (`str, *optional*`): The path to the initialized adapter, which is obtained after initializing the model with PiSSA or OLoRA and before performing any training. When `path_initial_model_for_weight_conversion` is not None, the @@ -281,13 +278,6 @@ def save_pretrained( f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" f" {list(self.peft_config.keys())} - got {selected_adapters}." ) - # TODO: remove deprecated parameter in PEFT v0.14.0 - if convert_pissa_to_lora is not None: - warnings.warn( - "`convert_pissa_to_lora` is deprecated and will be removed in a future version. " - "Use `path_initial_model_for_weight_conversion` instead." - ) - path_initial_model_for_weight_conversion = convert_pissa_to_lora def save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs): if peft_config.use_rslora and (peft_config.rank_pattern or peft_config.alpha_pattern): diff --git a/tests/test_custom_models.py b/tests/test_custom_models.py index b62bf58b2a..761c2d5a66 100644 --- a/tests/test_custom_models.py +++ b/tests/test_custom_models.py @@ -912,7 +912,13 @@ def from_pretrained(cls, model_id, torch_dtype=None): class PeftCustomModelTester(unittest.TestCase, PeftCommonTester): - """TODO""" + """ + Implements the tests for custom models. + + Most tests should just call the parent class, e.g. test_save_pretrained calls self._test_save_pretrained. Override + this if custom models don't work with the parent test method. + + """ transformers_class = MockTransformerWrapper diff --git a/tests/test_initialization.py b/tests/test_initialization.py index 9ac7ffe080..4fc96d0bb4 100644 --- a/tests/test_initialization.py +++ b/tests/test_initialization.py @@ -583,44 +583,6 @@ def test_pissa_alpha_pattern_and_rslora_raises(self, tmp_path): tmp_path / "pissa-model", path_initial_model_for_weight_conversion=tmp_path / "init-model" ) - # TODO: remove test for deprecated arg in PEFT v0.14.0 - def test_lora_pissa_conversion_same_output_after_loading_with_deprecated_arg(self, data, tmp_path): - model = self.get_model() - config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8) - peft_model = get_peft_model(deepcopy(model), config) - peft_model.peft_config["default"].init_lora_weights = True - peft_model.save_pretrained(tmp_path / "init-model") - peft_model.peft_config["default"].init_lora_weights = "pissa" - - tol = 1e-06 - peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0 - output_pissa = peft_model(data)[0] - - peft_model.save_pretrained(tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model") - model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted") - output_converted = model_converted(data)[0] - - assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol) - assert model_converted.peft_config["default"].r == 16 - assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16 - assert torch.allclose( - model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol - ) - - # TODO: remove test for deprecated warning in PEFT v0.14.0 - def test_lora_pissa_conversion_deprecated_warning(self, data, tmp_path): - model = self.get_model() - config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8) - peft_model = get_peft_model(deepcopy(model), config) - peft_model.peft_config["default"].init_lora_weights = True - peft_model.save_pretrained(tmp_path / "init-model") - warning_message = "`convert_pissa_to_lora` is deprecated and will be removed in a future version. Use `path_initial_model_for_weight_conversion` instead." - # Test the warning - with pytest.warns(UserWarning, match=warning_message): - peft_model.save_pretrained( - tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model" - ) - def test_olora_conversion_same_output_after_loading(self, data, tmp_path): model = self.get_model() output_base = model(data)[0] diff --git a/tests/test_xlora.py b/tests/test_xlora.py index e150116b06..65d966b053 100644 --- a/tests/test_xlora.py +++ b/tests/test_xlora.py @@ -15,10 +15,8 @@ import os import huggingface_hub -import packaging import pytest import torch -import transformers from safetensors.torch import load_file from transformers import AutoModelForCausalLM, AutoTokenizer @@ -27,9 +25,6 @@ from peft.utils import infer_device -uses_transformers_4_45 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.45.0") - - class TestXlora: torch_device = infer_device() @@ -133,8 +128,7 @@ def test_functional(self, tokenizer, model): ) assert torch.isfinite(outputs[: inputs.shape[1] :]).all() - # TODO: remove the skip when 4.45 is released! - @pytest.mark.skipif(not uses_transformers_4_45, reason="Requires transformers >= 4.45") + # TODO: fix the xfailing test @pytest.mark.xfail def test_scalings_logging_methods(self, tokenizer, model): model.enable_scalings_logging()