From dda2ccacb59577df345b7bd557c93d98b4b3784a Mon Sep 17 00:00:00 2001 From: changwangss Date: Wed, 18 Oct 2023 14:08:47 -0700 Subject: [PATCH 1/5] reduce the evaluation time Signed-off-by: changwangss --- tests/test_evaluation.py | 78 ++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 52 deletions(-) diff --git a/tests/test_evaluation.py b/tests/test_evaluation.py index 8a3fd557275..97c74864748 100644 --- a/tests/test_evaluation.py +++ b/tests/test_evaluation.py @@ -10,10 +10,9 @@ class TestLmEvaluationHarness(unittest.TestCase): @classmethod def setUpClass(self): self.clm_model = AutoModelForCausalLM.from_pretrained( - "facebook/opt-125m", + "hf-internal-testing/tiny-random-gptj", torchscript=True ) - self.seq2seq_model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") tmp_model = torch.jit.trace( self.clm_model, self.clm_model.dummy_inputs["input_ids"] ) @@ -33,22 +32,21 @@ def tearDownClass(self): shutil.rmtree("./gptj", ignore_errors=True) shutil.rmtree("./gptj-past", ignore_errors=True) shutil.rmtree("./evaluation_results.json", ignore_errors=True) - shutil.rmtree("./llama", ignore_errors=True) cmd = 'pip uninstall lm_eval -y' p = subprocess.Popen(cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # nosec p.communicate() - def test_evaluate_for_casualLM(self): + def test_evaluate_for_CasualLM(self): from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate results = evaluate( model="hf-causal", model_args='pretrained="hf-internal-testing/tiny-random-gptj",tokenizer="hf-internal-testing/tiny-random-gptj",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.45) + self.assertEqual(results["results"]["piqa"]["acc"], 0.6) def test_evaluate_for_Seq2SeqLM(self): from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate @@ -56,9 +54,9 @@ def test_evaluate_for_Seq2SeqLM(self): model="hf-seq2seq", model_args='pretrained="hf-internal-testing/tiny-random-t5",tokenizer="hf-internal-testing/tiny-random-t5",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.60) + self.assertEqual(results["results"]["piqa"]["acc"], 1.0) def test_evaluate_for_JitModel(self): from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate @@ -67,34 +65,26 @@ def test_evaluate_for_JitModel(self): model_args='pretrained="hf-internal-testing/tiny-random-gptj",tokenizer="hf-internal-testing/tiny-random-gptj",dtype=float32', user_model=self.jit_model, tasks=["piqa"], - limit=20, + limit=5, ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.65) + self.assertEqual(results["results"]["piqa"]["acc"], 0.6) - def test_lambada_for_llama(self): - from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate - results = evaluate( - model="hf-causal", - model_args='pretrained="decapoda-research/llama-7b-hf",tokenizer="decapoda-research/llama-7b-hf",dtype=float32', - tasks=["lambada_openai", "lambada_standard"], - limit=20, - ) - self.assertEqual(results["results"]["lambada_standard"]["acc"], 0.75) - self.assertEqual(results["results"]["lambada_openai"]["acc"], 0.70) def test_cnn_daily(self): from intel_extension_for_transformers.llm.evaluation.hf_eval import summarization_evaluate + model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") results = summarization_evaluate( - model=self.clm_model, + model=model, tokenizer_name="facebook/opt-125m", batch_size=1, limit=5, ) self.assertEqual(results["rouge2"], 18.0431) + model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") results = summarization_evaluate( - model=self.seq2seq_model, tokenizer_name="t5-small", batch_size=1, limit=5 + model=model, tokenizer_name="t5-small", batch_size=1, limit=5 ) - self.assertEqual(results["rouge2"], 9.6795) + self.assertEqual(results["rouge2"], 9.5858) def test_evaluate_for_ort_Seq2SeqLM(self): from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate @@ -108,10 +98,10 @@ def test_evaluate_for_ort_Seq2SeqLM(self): model="hf-seq2seq", model_args='pretrained="./t5-past",tokenizer="./t5-past",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, model_format="onnx" ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.60) + self.assertEqual(results["results"]["piqa"]["acc"], 1.0) # test evaluate encoder_model + decoder_model + decoder_with_past_model merged_model_path = "./t5-past/decoder_model_merged.onnx" @@ -121,10 +111,10 @@ def test_evaluate_for_ort_Seq2SeqLM(self): model="hf-seq2seq", model_args='pretrained="./t5-past",tokenizer="./t5-past",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, model_format="onnx" ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.60) + self.assertEqual(results["results"]["piqa"]["acc"], 1.0) # test evaluate encoder_model + decoder_model cmd = 'optimum-cli export onnx --model hf-internal-testing/tiny-random-t5 --task text2text-generation t5/' @@ -135,12 +125,12 @@ def test_evaluate_for_ort_Seq2SeqLM(self): model="hf-seq2seq", model_args='pretrained="./t5",tokenizer="./t5",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, model_format="onnx" ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.60) + self.assertEqual(results["results"]["piqa"]["acc"], 1.0) - def test_evaluate_for_ort_casualLM(self): + def test_evaluate_for_ort_CasualLM(self): from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate cmd = 'optimum-cli export onnx --model hf-internal-testing/tiny-random-gptj --task text-generation-with-past gptj-past/' p = subprocess.Popen(cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, @@ -152,10 +142,10 @@ def test_evaluate_for_ort_casualLM(self): model="hf-causal", model_args='pretrained="./gptj-past",tokenizer="./gptj-past",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, model_format="onnx" ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.45) + self.assertEqual(results["results"]["piqa"]["acc"], 0.6) # test evaluate decoder_model + decoder_with_past_model merged_model_path = "./gptj-past/decoder_model_merged.onnx" @@ -165,10 +155,10 @@ def test_evaluate_for_ort_casualLM(self): model="hf-causal", model_args='pretrained="./gptj-past",tokenizer="./gptj-past",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, model_format="onnx" ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.45) + self.assertEqual(results["results"]["piqa"]["acc"], 0.6) # test evaluate decoder_model cmd = 'optimum-cli export onnx --model hf-internal-testing/tiny-random-gptj --task text-generation gptj/' @@ -179,27 +169,11 @@ def test_evaluate_for_ort_casualLM(self): model="hf-causal", model_args='pretrained="./gptj",tokenizer="./gptj",dtype=float32', tasks=["piqa"], - limit=20, + limit=5, model_format="onnx" ) - self.assertEqual(results["results"]["piqa"]["acc"], 0.45) + self.assertEqual(results["results"]["piqa"]["acc"], 0.6) - def test_tokenizer_for_llama(self): - from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate - cmd = 'optimum-cli export onnx --model decapoda-research/llama-7b-hf --task text-generation llama/' - p = subprocess.Popen(cmd, preexec_fn=os.setsid, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=True) # nosec - p.communicate() - - results = evaluate( - model="hf-causal", - model_args='pretrained="./llama",tokenizer="decapoda-research/llama-7b-hf"', - tasks=["lambada_openai"], - limit=20, - model_format="onnx" - ) - self.assertEqual(results["results"]["lambada_openai"]["acc"], 0.70) - if __name__ == "__main__": unittest.main() From 4adacf1112d9fd88a83c5cc7bcf7f3734e48ffec Mon Sep 17 00:00:00 2001 From: Wenxin Zhang Date: Thu, 19 Oct 2023 13:55:29 +0800 Subject: [PATCH 2/5] use INC 2.3.1 Signed-off-by: Wenxin Zhang --- .../workflows/script/unitTest/env_setup.sh | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/.github/workflows/script/unitTest/env_setup.sh b/.github/workflows/script/unitTest/env_setup.sh index 47ca0650e3d..969c1c75e0c 100644 --- a/.github/workflows/script/unitTest/env_setup.sh +++ b/.github/workflows/script/unitTest/env_setup.sh @@ -7,15 +7,16 @@ if [ ${inc} != 0 ]; then fi echo "Install neural_compressor binary..." -n=0 -until [ "$n" -ge 5 ]; do - git clone https://github.com/intel/neural-compressor.git /neural-compressor - cd /neural-compressor - pip install -r requirements.txt - python setup.py install && break - n=$((n + 1)) - sleep 5 -done +pip install neural-compressor +#n=0 +#until [ "$n" -ge 5 ]; do +# git clone https://github.com/intel/neural-compressor.git /neural-compressor +# cd /neural-compressor +# pip install -r requirements.txt +# python setup.py install && break +# n=$((n + 1)) +# sleep 5 +#done # Install test requirements cd /intel-extension-for-transformers/tests From 0487248d32468c76f46f768ce6e3984b1865c108 Mon Sep 17 00:00:00 2001 From: "Wang, Chang" Date: Thu, 19 Oct 2023 17:41:10 +0800 Subject: [PATCH 3/5] Update test_evaluation.py Signed-off-by: Wang, Chang --- tests/test_evaluation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_evaluation.py b/tests/test_evaluation.py index 97c74864748..e42e5d96aad 100644 --- a/tests/test_evaluation.py +++ b/tests/test_evaluation.py @@ -84,7 +84,7 @@ def test_cnn_daily(self): results = summarization_evaluate( model=model, tokenizer_name="t5-small", batch_size=1, limit=5 ) - self.assertEqual(results["rouge2"], 9.5858) + self.assertEqual(results["rouge2"], 9.6795) def test_evaluate_for_ort_Seq2SeqLM(self): from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate From e83812feb7ffe0cce86cf6acb61356b179650240 Mon Sep 17 00:00:00 2001 From: Wenxin Zhang Date: Thu, 19 Oct 2023 17:52:56 +0800 Subject: [PATCH 4/5] update torch/ipex version Signed-off-by: Wenxin Zhang --- .../deployment/imagenet/vit/requirements.txt | 2 +- .../image-classification/quantization/requirements.txt | 2 +- .../fill-mask/electra_base_chinese/requirements.txt | 2 +- .../deployment/squad/bert_large/requirements.txt | 2 +- .../squad/length_adaptive_transformer/requirements.txt | 2 +- .../pytorch/question-answering/dynamic/requirements.txt | 2 +- .../pruning/basic_magnitude/requirements.txt | 2 +- .../pruning/longformer_triviaqa/requirements.txt | 2 +- .../pytorch/summarization/quantization/requirements.txt | 2 +- .../text-classification/cascade-models/requirements.txt | 2 +- .../emotion/distilbert_base_uncased/requirements.txt | 2 +- .../deployment/mrpc/bert_base/requirements.txt | 2 +- .../deployment/mrpc/bert_base_cased/requirements.txt | 2 +- .../deployment/mrpc/bert_mini/requirements.txt | 2 +- .../mrpc/distilbert_base_uncased/requirements.txt | 2 +- .../deployment/mrpc/roberta_base/requirements.txt | 2 +- .../deployment/sparse/bert_mini/requirements.txt | 2 +- .../sparse/distilbert_base_uncased/requirements.txt | 2 +- .../deployment/sst2/bert_mini/requirements.txt | 2 +- .../sst2/distilbert_base_uncased/requirements.txt | 2 +- .../sst2/minilm_l6_h384_uncased/requirements.txt | 2 +- .../text-classification/early-exit/requirements.txt | 2 +- .../text-classification/new_pruning/requirements.txt | 2 +- .../orchestrate_optimizations/requirements.txt | 2 +- .../pytorch/text-classification/pruning/requirements.txt | 2 +- .../text-classification/quantization/ptq/requirements.txt | 2 +- .../text-classification/quantization/qat/requirements.txt | 2 +- .../text-classification/quantization/requirements.txt | 2 +- .../deployment/stable_diffusion/requirements.txt | 2 +- .../textual-inversion/quantization/requirements.txt | 2 +- .../token-classification/quantization/requirements.txt | 2 +- .../pytorch/translation/quantization/requirements.txt | 2 +- .../neural_chat/docker/Dockerfile | 8 ++++---- .../neural_chat/docker/finetuning/Dockerfile | 2 +- .../docs/notebooks/multi_node_finetuning_on_spr.ipynb | 2 +- .../neural_chat/examples/instruction_tuning/README.md | 2 +- .../examples/instruction_tuning/requirements.txt | 2 +- .../neural_chat/requirements.txt | 4 ++-- .../neural_chat/requirements_cpu.txt | 6 +++--- .../neural_chat/requirements_pc.txt | 4 ++-- .../neural_chat/tests/requirements.txt | 4 ++-- .../neural_chat/ui/textbot/requirements.txt | 2 +- requirements.txt | 2 +- tests/requirements.txt | 2 +- workflows/chatbot/fine_tuning/README.md | 2 +- .../hf_finetuning_and_inference_nlp/requirements.txt | 2 +- 46 files changed, 54 insertions(+), 54 deletions(-) diff --git a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/requirements.txt b/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/requirements.txt index 0efa4714cac..76366c5efbc 100644 --- a/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/requirements.txt +++ b/examples/huggingface/pytorch/image-classification/deployment/imagenet/vit/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 torchvision onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/image-classification/quantization/requirements.txt b/examples/huggingface/pytorch/image-classification/quantization/requirements.txt index 517029af1af..d59aa71865a 100644 --- a/examples/huggingface/pytorch/image-classification/quantization/requirements.txt +++ b/examples/huggingface/pytorch/image-classification/quantization/requirements.txt @@ -3,7 +3,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 torchvision onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/language-modeling/deployment/fill-mask/electra_base_chinese/requirements.txt b/examples/huggingface/pytorch/language-modeling/deployment/fill-mask/electra_base_chinese/requirements.txt index 75453d454e3..1a7cebc506e 100644 --- a/examples/huggingface/pytorch/language-modeling/deployment/fill-mask/electra_base_chinese/requirements.txt +++ b/examples/huggingface/pytorch/language-modeling/deployment/fill-mask/electra_base_chinese/requirements.txt @@ -1,2 +1,2 @@ transformers -torch==2.0.1 \ No newline at end of file +torch==2.1.0 \ No newline at end of file diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/requirements.txt b/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/requirements.txt +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/bert_large/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/requirements.txt b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/requirements.txt index d78b3f5e411..e5d78e27646 100644 --- a/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/requirements.txt +++ b/examples/huggingface/pytorch/question-answering/deployment/squad/length_adaptive_transformer/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.12.1 diff --git a/examples/huggingface/pytorch/question-answering/dynamic/requirements.txt b/examples/huggingface/pytorch/question-answering/dynamic/requirements.txt index 8f7d7b7763a..b07abba0c1c 100644 --- a/examples/huggingface/pytorch/question-answering/dynamic/requirements.txt +++ b/examples/huggingface/pytorch/question-answering/dynamic/requirements.txt @@ -1,6 +1,6 @@ transformers datasets torchprofile -torch==2.0.1 +torch==2.1.0 intel_extension_for_pytorch accelerate diff --git a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/requirements.txt b/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/requirements.txt index adf0950d7e8..356d1691b68 100644 --- a/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/requirements.txt +++ b/examples/huggingface/pytorch/question-answering/pruning/basic_magnitude/requirements.txt @@ -1,5 +1,5 @@ datasets >= 1.8.0 -torch==2.0.1 +torch==2.1.0 transformers wandb accelerate diff --git a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt index 6303484dd95..5a6251b32c9 100644 --- a/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt +++ b/examples/huggingface/pytorch/question-answering/pruning/longformer_triviaqa/requirements.txt @@ -1,5 +1,5 @@ accelerate datasets transformers -torch==2.0.1 +torch==2.1.0 neural-compressor==2.0 diff --git a/examples/huggingface/pytorch/summarization/quantization/requirements.txt b/examples/huggingface/pytorch/summarization/quantization/requirements.txt index 3196cace86f..cac876a61ee 100644 --- a/examples/huggingface/pytorch/summarization/quantization/requirements.txt +++ b/examples/huggingface/pytorch/summarization/quantization/requirements.txt @@ -4,6 +4,6 @@ sentencepiece != 0.1.92 rouge-score nltk py7zr -torch==2.0.1 +torch==2.1.0 transformers protobuf diff --git a/examples/huggingface/pytorch/text-classification/cascade-models/requirements.txt b/examples/huggingface/pytorch/text-classification/cascade-models/requirements.txt index 97c77505a07..04e5d00da95 100644 --- a/examples/huggingface/pytorch/text-classification/cascade-models/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/cascade-models/requirements.txt @@ -1,4 +1,4 @@ -torch==2.0.1 +torch==2.1.0 numpy transformers datasets diff --git a/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/emotion/distilbert_base_uncased/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_base_cased/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/bert_mini/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/distilbert_base_uncased/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/mrpc/roberta_base/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/sparse/bert_mini/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/sparse/distilbert_base_uncased/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/bert_mini/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/distilbert_base_uncased/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/requirements.txt b/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/requirements.txt index 273d45fd971..7661f639bc8 100644 --- a/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/deployment/sst2/minilm_l6_h384_uncased/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diff --git a/examples/huggingface/pytorch/text-classification/early-exit/requirements.txt b/examples/huggingface/pytorch/text-classification/early-exit/requirements.txt index 22c8a2d2cdd..dbc84748da8 100644 --- a/examples/huggingface/pytorch/text-classification/early-exit/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/early-exit/requirements.txt @@ -1,4 +1,4 @@ -torch==2.0.1 +torch==2.1.0 transformers datasets allennlp diff --git a/examples/huggingface/pytorch/text-classification/new_pruning/requirements.txt b/examples/huggingface/pytorch/text-classification/new_pruning/requirements.txt index d1684b2889f..57368f3c958 100644 --- a/examples/huggingface/pytorch/text-classification/new_pruning/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/new_pruning/requirements.txt @@ -6,5 +6,5 @@ sentencepiece scipy scikit-learn protobuf -torch==2.0.1 +torch==2.1.0 evaluate \ No newline at end of file diff --git a/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/requirements.txt b/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/requirements.txt index b1b8aebabf1..60f12cca57c 100644 --- a/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/orchestrate_optimizations/requirements.txt @@ -1,5 +1,5 @@ accelerate -torch==2.0.1 +torch==2.1.0 datasets >= 1.1.3 sentencepiece != 0.1.92 transformers diff --git a/examples/huggingface/pytorch/text-classification/pruning/requirements.txt b/examples/huggingface/pytorch/text-classification/pruning/requirements.txt index 6540152daf9..1456f81b2f3 100644 --- a/examples/huggingface/pytorch/text-classification/pruning/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/pruning/requirements.txt @@ -2,6 +2,6 @@ accelerate datasets >= 1.1.3 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 transformers wandb diff --git a/examples/huggingface/pytorch/text-classification/quantization/ptq/requirements.txt b/examples/huggingface/pytorch/text-classification/quantization/ptq/requirements.txt index 6540152daf9..1456f81b2f3 100644 --- a/examples/huggingface/pytorch/text-classification/quantization/ptq/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/quantization/ptq/requirements.txt @@ -2,6 +2,6 @@ accelerate datasets >= 1.1.3 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 transformers wandb diff --git a/examples/huggingface/pytorch/text-classification/quantization/qat/requirements.txt b/examples/huggingface/pytorch/text-classification/quantization/qat/requirements.txt index 6540152daf9..1456f81b2f3 100644 --- a/examples/huggingface/pytorch/text-classification/quantization/qat/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/quantization/qat/requirements.txt @@ -2,6 +2,6 @@ accelerate datasets >= 1.1.3 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 transformers wandb diff --git a/examples/huggingface/pytorch/text-classification/quantization/requirements.txt b/examples/huggingface/pytorch/text-classification/quantization/requirements.txt index 6540152daf9..1456f81b2f3 100644 --- a/examples/huggingface/pytorch/text-classification/quantization/requirements.txt +++ b/examples/huggingface/pytorch/text-classification/quantization/requirements.txt @@ -2,6 +2,6 @@ accelerate datasets >= 1.1.3 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 transformers wandb diff --git a/examples/huggingface/pytorch/text-to-image/deployment/stable_diffusion/requirements.txt b/examples/huggingface/pytorch/text-to-image/deployment/stable_diffusion/requirements.txt index 52f0759c2ab..c74efb403bf 100644 --- a/examples/huggingface/pytorch/text-to-image/deployment/stable_diffusion/requirements.txt +++ b/examples/huggingface/pytorch/text-to-image/deployment/stable_diffusion/requirements.txt @@ -4,7 +4,7 @@ accelerate datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -torch==2.0.1 +torch==2.1.0 onnx>=1.12 onnxruntime==1.13.1 diffusers==0.12.1 diff --git a/examples/huggingface/pytorch/textual-inversion/quantization/requirements.txt b/examples/huggingface/pytorch/textual-inversion/quantization/requirements.txt index 5b738372901..67a89afb643 100644 --- a/examples/huggingface/pytorch/textual-inversion/quantization/requirements.txt +++ b/examples/huggingface/pytorch/textual-inversion/quantization/requirements.txt @@ -1,6 +1,6 @@ diffusers==0.4.1 accelerate -torch==2.0.1 +torch==2.1.0 torchvision transformers ftfy diff --git a/examples/huggingface/pytorch/token-classification/quantization/requirements.txt b/examples/huggingface/pytorch/token-classification/quantization/requirements.txt index 0403c13ac3b..50591581b25 100644 --- a/examples/huggingface/pytorch/token-classification/quantization/requirements.txt +++ b/examples/huggingface/pytorch/token-classification/quantization/requirements.txt @@ -1,6 +1,6 @@ accelerate seqeval datasets >= 1.1.3 -torch==2.0.1 +torch==2.1.0 transformers wandb diff --git a/examples/huggingface/pytorch/translation/quantization/requirements.txt b/examples/huggingface/pytorch/translation/quantization/requirements.txt index 069db64bd79..436b3bcc059 100644 --- a/examples/huggingface/pytorch/translation/quantization/requirements.txt +++ b/examples/huggingface/pytorch/translation/quantization/requirements.txt @@ -4,5 +4,5 @@ sentencepiece != 0.1.92 protobuf sacrebleu >= 1.4.12 py7zr -torch==2.0.1 +torch==2.1.0 transformers diff --git a/intel_extension_for_transformers/neural_chat/docker/Dockerfile b/intel_extension_for_transformers/neural_chat/docker/Dockerfile index 09ab00f65dc..93ce0fe9b52 100644 --- a/intel_extension_for_transformers/neural_chat/docker/Dockerfile +++ b/intel_extension_for_transformers/neural_chat/docker/Dockerfile @@ -66,11 +66,11 @@ RUN conda init bash && \ ## If local torch whl are feasible, use localfile to install, avoiding donwloading time COPY ./torch*.whl / -RUN if [ -f /torch-2.0.1+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl ]; then source activate && conda activate neuralchat && pip install /torch-2.0.1+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl; fi -RUN if [ -f /torchaudio-2.0.2+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl ]; then source activate && conda activate neuralchat && pip install /torchaudio-2.0.2+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl; fi -RUN if [ -f /torchvision-0.15.2+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl ]; then source activate && conda activate neuralchat && pip install /torchvision-0.15.2+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl; fi +RUN if [ -f /torch-2.1.0+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl ]; then source activate && conda activate neuralchat && pip install /torch-2.0.1+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl; fi +RUN if [ -f /torchaudio-2.1.0+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl ]; then source activate && conda activate neuralchat && pip install /torchaudio-2.0.2+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl; fi +RUN if [ -f /torchvision-0.16.0+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl ]; then source activate && conda activate neuralchat && pip install /torchvision-0.15.2+cpu-cp3${PYTHON_VERSION##*.}-cp3${PYTHON_VERSION##*.}-linux_x86_64.whl; fi -RUN source activate && conda activate neuralchat && pip install oneccl_bind_pt==2.0.0 -f https://developer.intel.com/ipex-whl-stable-cpu && \ +RUN source activate && conda activate neuralchat && pip install oneccl_bind_pt==2.1.0 -f https://developer.intel.com/ipex-whl-stable-cpu && \ cd /intel-extension-for-transformers && pip install -r requirements.txt && pip install -v . && \ cd ./intel_extension_for_transformers/neural_chat/examples/instruction_tuning && pip install -r requirements.txt && \ cd /intel-extension-for-transformers/intel_extension_for_transformers/neural_chat && pip install -r requirements_cpu.txt && \ diff --git a/intel_extension_for_transformers/neural_chat/docker/finetuning/Dockerfile b/intel_extension_for_transformers/neural_chat/docker/finetuning/Dockerfile index bd33a2f9fd3..ade85a371f9 100644 --- a/intel_extension_for_transformers/neural_chat/docker/finetuning/Dockerfile +++ b/intel_extension_for_transformers/neural_chat/docker/finetuning/Dockerfile @@ -63,7 +63,7 @@ RUN conda init bash && \ echo "conda activate chatbot-finetuning" >> ~/.bashrc && \ source ~/.bashrc -RUN source activate && conda activate chatbot-finetuning && pip install oneccl_bind_pt==2.0.0 -f https://developer.intel.com/ipex-whl-stable-cpu && \ +RUN source activate && conda activate chatbot-finetuning && pip install oneccl_bind_pt==2.1.0 -f https://developer.intel.com/ipex-whl-stable-cpu && \ pip install datasets torch accelerate SentencePiece evaluate nltk rouge_score protobuf==3.20.1 tokenizers einops && \ git clone https://github.com/huggingface/peft.git && cd peft && python setup.py install && \ cd /itrex && pip install -v . && \ diff --git a/intel_extension_for_transformers/neural_chat/docs/notebooks/multi_node_finetuning_on_spr.ipynb b/intel_extension_for_transformers/neural_chat/docs/notebooks/multi_node_finetuning_on_spr.ipynb index 788f9145d89..24971ab642c 100644 --- a/intel_extension_for_transformers/neural_chat/docs/notebooks/multi_node_finetuning_on_spr.ipynb +++ b/intel_extension_for_transformers/neural_chat/docs/notebooks/multi_node_finetuning_on_spr.ipynb @@ -23,7 +23,7 @@ "```bash\n", "pip install -r requirements.txt\n", "# To use ccl as the distributed backend in distributed training on CPU requires to install below requirement.\n", - "python -m pip install oneccl_bind_pt==2.0.0 -f https://developer.intel.com/ipex-whl-stable-cpu\n", + "python -m pip install oneccl_bind_pt==2.1.0 -f https://developer.intel.com/ipex-whl-stable-cpu\n", "```\n", "\n", "Then, follow the [hugginface guide](https://huggingface.co/docs/transformers/perf_train_cpu_many) to install IntelĀ® oneCCL Bindings for PyTorch, IPEX\n", diff --git a/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/README.md b/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/README.md index d25f15b1a38..200c39843e5 100644 --- a/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/README.md +++ b/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/README.md @@ -18,7 +18,7 @@ Recommend python 3.9 or higher version. ```shell pip install -r requirements.txt # To use ccl as the distributed backend in distributed training on CPU requires to install below requirement. -python -m pip install oneccl_bind_pt==2.0.0 -f https://developer.intel.com/ipex-whl-stable-cpu +python -m pip install oneccl_bind_pt==2.1.0 -f https://developer.intel.com/ipex-whl-stable-cpu ``` ## 2. Prepare the Model diff --git a/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/requirements.txt b/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/requirements.txt index 6dbdb1ab8bd..4af5e860246 100644 --- a/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/examples/instruction_tuning/requirements.txt @@ -1,5 +1,5 @@ datasets -torch==2.0.1 +torch==2.1.0 transformers>=4.32.0 sentencepiece peft diff --git a/intel_extension_for_transformers/neural_chat/requirements.txt b/intel_extension_for_transformers/neural_chat/requirements.txt index 0651e6ee2d0..86ebb418392 100644 --- a/intel_extension_for_transformers/neural_chat/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/requirements.txt @@ -1,8 +1,8 @@ transformers>=4.32.0 peft fschat -torch==2.0.1 -torchaudio==2.0.2 +torch==2.1.0 +torchaudio==2.1.0 intel_extension_for_pytorch num2words speechbrain diff --git a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt index 4ddfa0a21a0..90e21457a08 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_cpu.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_cpu.txt @@ -1,7 +1,7 @@ transformers>=4.32.0 peft fschat -intel_extension_for_pytorch==2.0.100 +intel_extension_for_pytorch==2.1.0 num2words speechbrain paddlepaddle @@ -37,7 +37,7 @@ tiktoken==0.4.0 lm_eval transformers_stream_generator==0.0.4 --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.0.1 -torchaudio==2.0.2 +torch==2.1.0 +torchaudio==2.1.0 spacy neural-compressor \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/requirements_pc.txt b/intel_extension_for_transformers/neural_chat/requirements_pc.txt index dfe67fb69bd..4d2e292401c 100644 --- a/intel_extension_for_transformers/neural_chat/requirements_pc.txt +++ b/intel_extension_for_transformers/neural_chat/requirements_pc.txt @@ -35,7 +35,7 @@ numpy==1.23.5 tiktoken==0.4.0 lm_eval --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.0.1 -torchaudio==2.0.2 +torch==2.1.0 +torchaudio==2.1.0 spacy neural-compressor \ No newline at end of file diff --git a/intel_extension_for_transformers/neural_chat/tests/requirements.txt b/intel_extension_for_transformers/neural_chat/tests/requirements.txt index 096d9d93647..861d1469ab4 100644 --- a/intel_extension_for_transformers/neural_chat/tests/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/tests/requirements.txt @@ -1,8 +1,8 @@ transformers>=4.32.0 peft fschat -torch==2.0.1 -torchaudio==2.0.2 +torch==2.1.0 +torchaudio==2.1.0 intel_extension_for_pytorch num2words speechbrain diff --git a/intel_extension_for_transformers/neural_chat/ui/textbot/requirements.txt b/intel_extension_for_transformers/neural_chat/ui/textbot/requirements.txt index e7c835c0e87..49e4e87b9c4 100644 --- a/intel_extension_for_transformers/neural_chat/ui/textbot/requirements.txt +++ b/intel_extension_for_transformers/neural_chat/ui/textbot/requirements.txt @@ -1,5 +1,5 @@ pip -torch==2.0.1 +torch==2.1.0 diffusers==0.8.1 transformers requests diff --git a/requirements.txt b/requirements.txt index 13d24a2bac6..b2b10389d7e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,5 +4,5 @@ py-cpuinfo setuptools>=65 setuptools_scm[toml]>=6.2 --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.0.1+cpu +torch==2.1.0+cpu accelerate diff --git a/tests/requirements.txt b/tests/requirements.txt index 65577165dbd..bc416259e3f 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -6,7 +6,7 @@ neural-compressor onnx>=1.10 onnxruntime --find-links https://download.pytorch.org/cpu/whl/torch_stable.html -torch==2.0.1 +torch==2.1.0 transformers intel-tensorflow==2.12.0 torchprofile diff --git a/workflows/chatbot/fine_tuning/README.md b/workflows/chatbot/fine_tuning/README.md index 6bd52568499..28980dbbb6b 100644 --- a/workflows/chatbot/fine_tuning/README.md +++ b/workflows/chatbot/fine_tuning/README.md @@ -18,7 +18,7 @@ Recommend python 3.9 or higher version. ```shell pip install -r requirements.txt # To use ccl as the distributed backend in distributed training on CPU requires to install below requirement. -python -m pip install oneccl_bind_pt==2.0.0 -f https://developer.intel.com/ipex-whl-stable-cpu +python -m pip install oneccl_bind_pt==2.1.0 -f https://developer.intel.com/ipex-whl-stable-cpu ``` ## 2. Prepare the Model diff --git a/workflows/hf_finetuning_and_inference_nlp/requirements.txt b/workflows/hf_finetuning_and_inference_nlp/requirements.txt index 38a57af4ae8..0221c42098d 100644 --- a/workflows/hf_finetuning_and_inference_nlp/requirements.txt +++ b/workflows/hf_finetuning_and_inference_nlp/requirements.txt @@ -2,7 +2,7 @@ transformers==4.31.0 datasets==2.11.0 neural-compressor==2.1 --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.0.1 +torch==2.1.0 intel_extension_for_pytorch==1.13.100 intel-extension-for-transformers==1.0.0 accelerate==0.21.0 \ No newline at end of file From 5993a27eeff33230e34a41ead1765ec3ddfb164a Mon Sep 17 00:00:00 2001 From: Wenxin Zhang Date: Thu, 19 Oct 2023 18:03:27 +0800 Subject: [PATCH 5/5] run PR test only Signed-off-by: Wenxin Zhang --- .github/workflows/unit-test-neuralchat.yml | 4 ++-- .github/workflows/unit-test-optimize.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/unit-test-neuralchat.yml b/.github/workflows/unit-test-neuralchat.yml index 63956e2f903..a5f51315c2a 100644 --- a/.github/workflows/unit-test-neuralchat.yml +++ b/.github/workflows/unit-test-neuralchat.yml @@ -36,8 +36,8 @@ jobs: include: - test_branch: ${{ github.ref }} test_name: "PR-test" - - test_branch: "main" - test_name: "baseline" + #- test_branch: "main" + # test_name: "baseline" steps: - name: podman Clean Up run: | diff --git a/.github/workflows/unit-test-optimize.yml b/.github/workflows/unit-test-optimize.yml index cee522c0819..bc7b3eff4e2 100644 --- a/.github/workflows/unit-test-optimize.yml +++ b/.github/workflows/unit-test-optimize.yml @@ -34,8 +34,8 @@ jobs: include: - test_branch: ${{ github.ref }} test_name: "PR-test" - - test_branch: "main" - test_name: "baseline" + #- test_branch: "main" + # test_name: "baseline" steps: - name: Docker Clean Up run: |