Skip to content
This repository has been archived by the owner on Oct 25, 2024. It is now read-only.

Update requirements for Chinese model NormalizedConfig #1323

Merged
merged 6 commits into from
Mar 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ sentencepiece != 0.1.92
torch==2.2.0+cpu
transformers
intel_extension_for_pytorch==2.2.0
git+https://github.com/huggingface/optimum.git@e38d40ad220a180213f99b1d93d0407a826c326d
optimum-intel
bitsandbytes #baichuan
transformers_stream_generator
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,25 +127,18 @@ function run_benchmark {
elif [ "${topology}" = "baichuan_7b" ]; then
model_name_or_path="baichuan-inc/Baichuan-7B"
extra_cmd=$extra_cmd" --trust_remote_code True"
pip install transformers==4.33
elif [ "${topology}" = "baichuan_13b" ]; then
model_name_or_path="baichuan-inc/Baichuan-13B-Base"
extra_cmd=$extra_cmd" --trust_remote_code True"
extra_cmd=$extra_cmd" --_commit_hash 14d5b0e204542744900f6fb52422c6d633bdcb00"
pip install transformers==4.33
elif [ "${topology}" = "baichuan2_7b" ]; then
model_name_or_path="baichuan-inc/Baichuan2-7B-Base"
extra_cmd=$extra_cmd" --trust_remote_code True"
pip install transformers==4.33
elif [ "${topology}" = "baichuan2_13b" ]; then
model_name_or_path="baichuan-inc/Baichuan2-13B-Base"
extra_cmd=$extra_cmd" --trust_remote_code True"
pip install transformers==4.33
elif [ "${topology}" = "qwen_7b" ]; then
model_name_or_path="Qwen/Qwen-7B"
extra_cmd=$extra_cmd" --trust_remote_code True"
extra_cmd=$extra_cmd" --_commit_hash f7bc352f27bb1c02ee371a4576942a7d96c8bb97"
pip install transformers==4.35.2
elif [ "${topology}" = "mistral_7b" ]; then
model_name_or_path="Intel/neural-chat-7b-v3"
elif [ "${topology}" = "phi_1b" ]; then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,37 +158,30 @@ function run_tuning {
extra_cmd=$extra_cmd" --sq --alpha ${alpha}"
extra_cmd=$extra_cmd" --output_dir ${tuned_checkpoint}"
extra_cmd=$extra_cmd" --trust_remote_code True"
pip install transformers==4.33
elif [ "${topology}" = "baichuan_13b" ]; then
alpha=0.85
model_name_or_path="baichuan-inc/Baichuan-13B-Base"
extra_cmd=$extra_cmd" --sq --alpha ${alpha}"
extra_cmd=$extra_cmd" --output_dir ${tuned_checkpoint}"
extra_cmd=$extra_cmd" --trust_remote_code True"
extra_cmd=$extra_cmd" --_commit_hash 14d5b0e204542744900f6fb52422c6d633bdcb00"
pip install transformers==4.33
elif [ "${topology}" = "baichuan2_7b" ]; then
alpha=0.85
model_name_or_path="baichuan-inc/Baichuan2-7B-Base"
extra_cmd=$extra_cmd" --sq --alpha ${alpha}"
extra_cmd=$extra_cmd" --output_dir ${tuned_checkpoint}"
extra_cmd=$extra_cmd" --trust_remote_code True"
pip install transformers==4.33
elif [ "${topology}" = "baichuan2_13b" ]; then
alpha=0.55
model_name_or_path="baichuan-inc/Baichuan2-13B-Base"
extra_cmd=$extra_cmd" --sq --alpha ${alpha}"
extra_cmd=$extra_cmd" --output_dir ${tuned_checkpoint}"
extra_cmd=$extra_cmd" --trust_remote_code True"
pip install transformers==4.33
elif [ "${topology}" = "qwen_7b" ]; then
alpha=0.9
model_name_or_path="Qwen/Qwen-7B"
extra_cmd=$extra_cmd" --sq --alpha ${alpha}"
extra_cmd=$extra_cmd" --output_dir ${tuned_checkpoint}"
extra_cmd=$extra_cmd" --trust_remote_code True"
extra_cmd=$extra_cmd" --_commit_hash f7bc352f27bb1c02ee371a4576942a7d96c8bb97"
pip install transformers==4.35.2
elif [ "${topology}" = "mistral_7b" ]; then
alpha=0.8
model_name_or_path="Intel/neural-chat-7b-v3"
Expand Down Expand Up @@ -216,8 +209,6 @@ function run_tuning {
extra_cmd=$extra_cmd" --output_dir ${tuned_checkpoint}"
extra_cmd=$extra_cmd" --trust_remote_code True"
pip install transformers==4.35.2
pip install torch==2.1.0+cpu torchvision==0.16.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install intel-extension-for-pytorch==2.1.0
fi

if [ ${script} = "run_generation.py" ];then
Expand Down
Loading