From 7c945cfe8ecb38e41ee57c8884d582e0b453bc8a Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sat, 18 Mar 2023 10:55:24 -0300 Subject: [PATCH] Don't include PeftModel every time --- modules/LoRA.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/LoRA.py b/modules/LoRA.py index b568e57b2e..f29523d2c3 100644 --- a/modules/LoRA.py +++ b/modules/LoRA.py @@ -1,13 +1,13 @@ from pathlib import Path -from peft import PeftModel - import modules.shared as shared from modules.models import load_model def add_lora_to_model(lora_name): + from peft import PeftModel + # Is there a more efficient way of returning to the base model? if lora_name == "None": print("Reloading the model to remove the LoRA...")