Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
lugimzzz committed Jan 2, 2024
1 parent dc72cb6 commit 32cb533
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 4 deletions.
13 changes: 11 additions & 2 deletions llm/merge_lora_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,19 @@
import os

import paddle
from paddle.nn.quant import weight_dequantize, weight_quantize

from paddlenlp.peft import LoRAConfig, LoRAModel
from paddlenlp.quantization.qlora import qlora_weight_quantize_dequantize

try:
from paddle.nn.quant import weight_dequantize, weight_quantize
except:
weight_dequantize = None
weight_quantize = None
try:
from paddlenlp.quantization.qlora import qlora_weight_quantize_dequantize
except:
qlora_weight_quantize_dequantize = None

from paddlenlp.quantization.quantization_config import QuantizationConfig
from paddlenlp.transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
from paddlenlp.transformers.utils import device_guard
Expand Down
3 changes: 1 addition & 2 deletions tests/llm/test_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,8 @@ def test_lora(self):

# merge weights
merge_lora_weights_config = {
"model_name_or_path": lora_config["model_name_or_path"],
"lora_path": lora_config["output_dir"],
"merge_model_path": lora_config["output_dir"],
"merge_lora_model_path": lora_config["output_dir"],
}
with argv_context_guard(merge_lora_weights_config):
from merge_lora_params import merge
Expand Down

0 comments on commit 32cb533

Please sign in to comment.