diff --git a/.github/workflows/docker/compose/reranks-compose-cd.yaml b/.github/workflows/docker/compose/reranks-compose-cd.yaml index 3e5e7caabf..1f468f8e53 100644 --- a/.github/workflows/docker/compose/reranks-compose-cd.yaml +++ b/.github/workflows/docker/compose/reranks-compose-cd.yaml @@ -14,3 +14,11 @@ services: build: dockerfile: comps/reranks/mosec/langchain/Dockerfile image: ${REGISTRY:-opea}/reranking-langchain-mosec:${TAG:-latest} + reranking-mosec-neural-speed: + build: + dockerfile: comps/reranks/neural-speed/docker/Dockerfile + image: ${REGISTRY:-opea}/reranking-mosec-neural-speed:${TAG:-latest} + reranking-mosec-neural-speed-endpoint: + build: + dockerfile: comps/reranks/neural-speed/neuralspeed-docker/Dockerfile + image: ${REGISTRY:-opea}/reranking-mosec-neural-speed-endpoint:${TAG:-latest} diff --git a/.github/workflows/pr-dockerfile-path-scan.yaml b/.github/workflows/pr-dockerfile-path-scan.yaml index 12f1b87087..ee41e34b4c 100644 --- a/.github/workflows/pr-dockerfile-path-scan.yaml +++ b/.github/workflows/pr-dockerfile-path-scan.yaml @@ -51,6 +51,7 @@ jobs: fi - name: Check for changed Dockerfile paths in readme + if: always() run: | set -e shopt -s globstar @@ -75,6 +76,37 @@ jobs: exit 1 fi + - name: Check new Dockerfile in compose yaml + if: always() + run: | + set -xe + shopt -s globstar + cd ${{github.workspace}} + no_add="FALSE" + merged_commit=$(git log -1 --format='%H') + changed_files="$(git diff --name-status --diff-filter=A ${{ github.event.pull_request.base.sha }} ${merged_commit} -- '**/Dockerfile**' | cut -f2)" + changed_yamls="$(git diff --name-status --diff-filter=AM ${{ github.event.pull_request.base.sha }} ${merged_commit} -- '**/*.yaml**' | cut -f2)" + if [ -n "$changed_files" ]; then + for file in $changed_files; do + service=$(echo "$file" | awk -F '/' '{print $2}') + if find "${{github.workspace}}/.github/workflows/docker/compose/" -name "*$service*" |grep -q .; then + if [ -n "$changed_files" ] && grep -q $service'-compose-cd.yaml' <<< "$changed_yamls"; then + echo "The $file has been added to the ${{github.workspace}}/.github/workflows/docker/compose/"$service"-compose-cd.yaml." + else + echo "Please check if the added $file is included in the yaml under path ${{github.workspace}}/.github/workflows/docker/compose/"$service"-compose-cd.yaml." + no_add="TRUE" + fi + else + echo "Please create a new compose file named "$service"-compose-cd.yaml in ${{github.workspace}}/.github/workflows/docker/compose/ for $file and fill it in." + no_add="TRUE" + fi + done + fi + + if [[ "$no_add" == "TRUE" ]]; then + exit 1 + fi + Dockerfile-path-change-detection-in-GenAIExamples: runs-on: ubuntu-latest steps: @@ -100,7 +132,7 @@ jobs: is_use="FALSE" used_files="" merged_commit=$(git log -1 --format='%H') - changed_files="$(git diff --name-status --diff-filter=DR ${{ github.event.pull_request.base.sha }} ${merged_commit} -- '**/Dockerfile' | cut -f2)" + changed_files="$(git diff --name-status --diff-filter=DR ${{ github.event.pull_request.base.sha }} ${merged_commit} -- '**/Dockerfile**' | cut -f2)" if [ -n "$changed_files" ]; then for file in $changed_files; do matching_files=$(grep -rl "$file" ../GenAIExamples/**/*.md) @@ -119,34 +151,3 @@ jobs: echo "Please modify the corresponding README in GenAIExamples repo and ask suyue.chen@intel.com for final confirmation." exit 1 fi - - Dockerfile-addition-detection-in-GenAIComps: - runs-on: ubuntu-latest - steps: - - name: Clean Up Working Directory - run: sudo rm -rf ${{github.workspace}}/* - - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Check if the Dockerfile has been added - run: | - set -e - shopt -s globstar - cd ${{github.workspace}} - is_use="FALSE" - used_files="" - merged_commit=$(git log -1 --format='%H') - changed_files="$(git diff --name-status --diff-filter=A ${{ github.event.pull_request.base.sha }} ${merged_commit} -- '**/Dockerfile**' | cut -f2)" - if [ -n "$changed_files" ]; then - for file in $changed_files; do - if find "${{github.workspace}}/.github/workflows/docker/compose/" -name "*$(echo "$file" | awk -F '/' '{print $2}')*" |grep -q .; then - echo "Please check if the added $file is included in the yaml under path ${{github.workspace}}/.github/workflows/docker/compose/." - else - echo "Please create a new compose file named service_name-compose-cd.yaml in ${{github.workspace}}/.github/workflows/docker/compose/ for $file and fill it in." - fi - done - exit 1 - fi diff --git a/.github/workflows/pr-microservice-test.yml b/.github/workflows/pr-microservice-test.yml index 00cf8b03af..a77ce7a5de 100644 --- a/.github/workflows/pr-microservice-test.yml +++ b/.github/workflows/pr-microservice-test.yml @@ -47,6 +47,7 @@ jobs: GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }} PINECONE_KEY: ${{ secrets.PINECONE_KEY }} + PREDICTIONGUARD_API_KEY: ${{ secrets.PREDICTIONGUARD_API_KEY }} service: ${{ matrix.service }} hardware: ${{ matrix.hardware }} run: | diff --git a/README.md b/README.md index e1b48a08ce..ecaf6c35d6 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ The initially supported `Microservices` are described in the below table. More ` Embedding LangChain/LlamaIndex - BAAI/bge-large-en-v1.5 + BAAI/bge-base-en-v1.5 TEI-Gaudi Gaudi2 Embedding on Gaudi2 @@ -76,7 +76,7 @@ The initially supported `Microservices` are described in the below table. More ` Reranking LangChain/LlamaIndex - BAAI/bge-reranker-large + BAAI/bge-reranker-base TEI-Gaudi Gaudi2 Reranking on Gaudi2 diff --git a/comps/dataprep/redis/README.md b/comps/dataprep/redis/README.md index 8f3ff396c5..ed8beb5edf 100644 --- a/comps/dataprep/redis/README.md +++ b/comps/dataprep/redis/README.md @@ -49,8 +49,8 @@ First, you need to start a TEI service. ```bash your_port=6006 -model="BAAI/bge-large-en-v1.5" -docker run -p $your_port:80 -v ./data:/data --name tei_server -e http_proxy=$http_proxy -e https_proxy=$https_proxy --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 --model-id $model +model="BAAI/bge-base-en-v1.5" +docker run -p $your_port:80 -v ./data:/data --name tei_server -e http_proxy=$http_proxy -e https_proxy=$https_proxy --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model ``` Then you need to test your TEI service using the following commands: diff --git a/comps/dataprep/redis/langchain/config.py b/comps/dataprep/redis/langchain/config.py index 75715912c9..2d722a84a6 100644 --- a/comps/dataprep/redis/langchain/config.py +++ b/comps/dataprep/redis/langchain/config.py @@ -5,7 +5,7 @@ # Embedding model -EMBED_MODEL = os.getenv("EMBED_MODEL", "BAAI/bge-large-en-v1.5") +EMBED_MODEL = os.getenv("EMBED_MODEL", "BAAI/bge-base-en-v1.5") # Redis Connection Information REDIS_HOST = os.getenv("REDIS_HOST", "localhost") diff --git a/comps/dataprep/utils.py b/comps/dataprep/utils.py index 571d5e8a47..f48d971574 100644 --- a/comps/dataprep/utils.py +++ b/comps/dataprep/utils.py @@ -285,6 +285,16 @@ def load_json(json_path): return content_list +def load_jsonl(jsonl_path): + """Load and process jsonl file.""" + content_list = [] + with open(jsonl_path, "r") as file: + for line in file: + json_obj = json.loads(line) + content_list.append(json_obj) + return content_list + + def load_yaml(yaml_path): """Load and process yaml file.""" with open(yaml_path, "r") as file: @@ -351,8 +361,10 @@ def document_loader(doc_path): return load_md(doc_path) elif doc_path.endswith(".xml"): return load_xml(doc_path) - elif doc_path.endswith(".json") or doc_path.endswith(".jsonl"): + elif doc_path.endswith(".json"): return load_json(doc_path) + elif doc_path.endswith(".jsonl"): + return load_jsonl(doc_path) elif doc_path.endswith(".yaml"): return load_yaml(doc_path) elif doc_path.endswith(".xlsx") or doc_path.endswith(".xls"): diff --git a/comps/embeddings/tei/langchain/local_embedding.py b/comps/embeddings/tei/langchain/local_embedding.py index 32f8944a98..6a0a1a630f 100644 --- a/comps/embeddings/tei/langchain/local_embedding.py +++ b/comps/embeddings/tei/langchain/local_embedding.py @@ -40,5 +40,5 @@ def embedding(input: TextDoc) -> EmbedDoc: if __name__ == "__main__": - embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-large-en-v1.5") + embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-base-en-v1.5") opea_microservices["opea_service@local_embedding"].start() diff --git a/comps/embeddings/tei/llama_index/embedding_tei.py b/comps/embeddings/tei/llama_index/embedding_tei.py index cf14f7790b..943bd75350 100644 --- a/comps/embeddings/tei/llama_index/embedding_tei.py +++ b/comps/embeddings/tei/llama_index/embedding_tei.py @@ -31,7 +31,7 @@ def embedding(input: TextDoc) -> EmbedDoc: if __name__ == "__main__": - tei_embedding_model_name = os.getenv("TEI_EMBEDDING_MODEL_NAME", "BAAI/bge-large-en-v1.5") + tei_embedding_model_name = os.getenv("TEI_EMBEDDING_MODEL_NAME", "BAAI/bge-base-en-v1.5") tei_embedding_endpoint = os.getenv("TEI_EMBEDDING_ENDPOINT", "http://localhost:8090") embeddings = TextEmbeddingsInference(model_name=tei_embedding_model_name, base_url=tei_embedding_endpoint) logger.info("TEI Gaudi Embedding initialized.") diff --git a/comps/embeddings/tei/llama_index/local_embedding.py b/comps/embeddings/tei/llama_index/local_embedding.py index 143d7bb07c..17ee6e89a8 100644 --- a/comps/embeddings/tei/llama_index/local_embedding.py +++ b/comps/embeddings/tei/llama_index/local_embedding.py @@ -31,5 +31,5 @@ def embedding(input: TextDoc) -> EmbedDoc: if __name__ == "__main__": - embeddings = HuggingFaceInferenceAPIEmbedding(model_name="BAAI/bge-large-en-v1.5") + embeddings = HuggingFaceInferenceAPIEmbedding(model_name="BAAI/bge-base-en-v1.5") opea_microservices["opea_service@local_embedding"].start() diff --git a/comps/finetuning/README.md b/comps/finetuning/README.md index 82fc29b496..21fd585d60 100644 --- a/comps/finetuning/README.md +++ b/comps/finetuning/README.md @@ -86,7 +86,7 @@ docker run --runtime=habana -e HABANA_VISIBLE_DEVICES=all -p 8015:8015 -e OMPI_M ## 🚀3. Consume Finetuning Service -## 3.1 Upload a training file +### 3.1 Upload a training file Download a training file, such as `alpaca_data.json` for instruction tuning and upload it to the server with below command, this file can be downloaded in [here](https://github.com/tatsu-lab/stanford_alpaca/blob/main/alpaca_data.json): @@ -97,9 +97,9 @@ curl http://${your_ip}:8015/v1/files -X POST -H "Content-Type: multipart/form-da For reranking and embedding models finetuning, the training file [toy_finetune_data.jsonl](https://github.com/FlagOpen/FlagEmbedding/blob/master/examples/finetune/toy_finetune_data.jsonl) is an toy example. -## 3.2 Create fine-tuning job +### 3.2 Create fine-tuning job -### 3.2.1 Instruction Tuning +#### 3.2.1 Instruction Tuning After a training file like `alpaca_data.json` is uploaded, use the following command to launch a finetuning job using `meta-llama/Llama-2-7b-chat-hf` as base model: @@ -114,7 +114,7 @@ curl http://${your_ip}:8015/v1/fine_tuning/jobs \ }' ``` -### 3.2.2 Reranking Model Training +#### 3.2.2 Reranking Model Training Use the following command to launch a finetuning job for reranking model finetuning, such as `BAAI/bge-reranker-large`: @@ -133,7 +133,7 @@ curl http://${your_ip}:8015/v1/fine_tuning/jobs \ }' ``` -### 3.2.3 Embedding Model Training +#### 3.2.3 Embedding Model Training Use the following command to launch a finetuning job for embedding model finetuning, such as `BAAI/bge-base-en-v1.5`: @@ -173,7 +173,33 @@ curl http://${your_ip}:8015/v1/fine_tuning/jobs \ ``` -## 3.3 Manage fine-tuning job +#### 3.2.4 LLM Pretraining + +Use the following command to launch a job for LLM pretraining, such as `meta-llama/Llama-2-7b-hf`: + +```bash +# create a finetuning job +curl http://${your_ip}:8015/v1/fine_tuning/jobs \ + -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "training_file": "test_data.json", + "model": "meta-llama/Llama-2-7b-hf", + "General":{ + "task":"pretraining", + "lora_config":null + } + }' +``` + +Below is an example for the format of the pretraining dataset: + +```json +{"text": "A girl with a blue tank top sitting watching three dogs."} +{"text": "A boy with a blue tank top sitting watching three dogs."} +``` + +### 3.3 Manage fine-tuning job Below commands show how to list finetuning jobs, retrieve a finetuning job, cancel a finetuning job and list checkpoints of a finetuning job. @@ -191,6 +217,10 @@ curl http://localhost:8015/v1/fine_tuning/jobs/cancel -X POST -H "Content-Type: curl http://${your_ip}:8015/v1/finetune/list_checkpoints -X POST -H "Content-Type: application/json" -d '{"fine_tuning_job_id": ${fine_tuning_job_id}}' ``` +### 3.4 Leverage fine-tuned model + +After fine-tuning job is done, fine-tuned model can be chosen from listed checkpoints, then the fine-tuned model can be used in other microservices. For example, fine-tuned reranking model can be used in [reranks](../reranks/README.md) microservice by assign its path to the environment variable `RERANK_MODEL_ID`, fine-tuned embedding model can be used in [embeddings](../embeddings/README.md) microservice by assign its path to the environment variable `model`, LLMs after instruction tuning can be used in [llms](../llms/README.md) microservice by assign its path to the environment variable `your_hf_llm_model`. + ## 🚀4. Descriptions for Finetuning parameters We utilize [OpenAI finetuning parameters](https://platform.openai.com/docs/api-reference/fine-tuning) and extend it with more customizable parameters, see the definitions at [finetune_config](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/finetune_config.py). diff --git a/comps/finetuning/finetune_config.py b/comps/finetuning/finetune_config.py index 3accabfb39..5473cd9aa2 100644 --- a/comps/finetuning/finetune_config.py +++ b/comps/finetuning/finetune_config.py @@ -16,6 +16,7 @@ DEVICE_CPU = "cpu" DEVICE_HPU = "hpu" DEVICE_GPU = "gpu" +DEVICE_CUDA = "cuda" ACCELERATE_STRATEGY_DDP = "DDP" ACCELERATE_STRATEGY_FSDP = "FSDP" @@ -57,7 +58,7 @@ def check_report_to(cls, v: str): @validator("task") def check_task(cls, v: str): - assert v in ["instruction_tuning", "rerank", "embedding"] + assert v in ["instruction_tuning", "pretraining", "rerank", "embedding"] return v @@ -136,7 +137,7 @@ class TrainingConfig(BaseModel): def check_device(cls, v: str): # will convert to lower case if v: - assert v.lower() in [DEVICE_CPU, DEVICE_GPU, DEVICE_HPU] + assert v.lower() in [DEVICE_CPU, DEVICE_GPU, DEVICE_HPU, DEVICE_CUDA] return v.lower() @validator("hpu_execution_mode") diff --git a/comps/finetuning/llm_on_ray/finetune/data_process.py b/comps/finetuning/llm_on_ray/finetune/data_process.py index d85bf2bfad..07b12d71e1 100644 --- a/comps/finetuning/llm_on_ray/finetune/data_process.py +++ b/comps/finetuning/llm_on_ray/finetune/data_process.py @@ -18,7 +18,7 @@ IGNORE_INDEX = -100 -class DataProcessor: +class InstructionDataProcessor: # We used the following prompts for fine-tuning the Alpaca model. You can find reference doc form this URL(https://github.com/tatsu-lab/stanford_alpaca/blob/main/README.md#data-release) def __init__(self, config, tokenizer): self.tokenizer = tokenizer @@ -202,6 +202,39 @@ def tokenize(self, examples): return examples +class PretrainingDataProcessor: + def __init__(self, config, tokenizer): + self.tokenizer = tokenizer + self.max_length = self.max_seq_length = config["Dataset"].get("max_length", 512) + self.truncation = config["Dataset"].get("truncation", True) + self.padding = config["Dataset"].get("padding", True) + + def tokenize(self, examples): + keys = list(examples.data.keys()) + if len(keys) != 1 and "text" not in keys: + raise ValueError("Unsupported dataset format") + + key = keys[0] if len(keys) == 1 else "text" + examples["input_ids"] = [] + examples["labels"] = [] + examples["attention_mask"] = [] + for exp in examples[key]: + results = self.tokenizer( + exp, + padding=self.padding, + truncation=self.truncation, + return_tensors=None, + max_length=self.max_length, + ) + + input_ids = results["input_ids"] + labels = copy.deepcopy(input_ids) + examples["input_ids"].append(results["input_ids"]) + examples["labels"].append(labels) + examples["attention_mask"].append(results["attention_mask"]) + return examples + + class TrainDatasetForCE(Dataset): def __init__(self, dataset, args, tokenizer): self.dataset = dataset diff --git a/comps/finetuning/llm_on_ray/finetune/finetune.py b/comps/finetuning/llm_on_ray/finetune/finetune.py index c66cc7bbec..f44deedff2 100644 --- a/comps/finetuning/llm_on_ray/finetune/finetune.py +++ b/comps/finetuning/llm_on_ray/finetune/finetune.py @@ -28,9 +28,10 @@ from comps.finetuning.finetune_config import FinetuneConfig from comps.finetuning.llm_on_ray import common from comps.finetuning.llm_on_ray.finetune.data_process import ( - DataProcessor, EmbedCollator, GroupCollator, + InstructionDataProcessor, + PretrainingDataProcessor, TrainDatasetForCE, TrainDatasetForEmbedding, ) @@ -198,9 +199,9 @@ def tokenize_dataset(config: Dict, tokenizer, dataset): if task == "instruction_tuning": group = config["Dataset"].get("group", True) block_size = config["Dataset"].get("block_size", 512) - tokenizer.pad_token = tokenizer.eos_token + tokenizer.pad_token = tokenizer.eos_token if not tokenizer.pad_token else tokenizer.pad_token - processor = DataProcessor(config, tokenizer) + processor = InstructionDataProcessor(config, tokenizer) for key in dataset: prompts = processor.make_prompt(dataset[key]) @@ -221,6 +222,48 @@ def tokenize_dataset(config: Dict, tokenizer, dataset): desc="Tokenize dataset", ) + if group: + + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [t[i : i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + return result + + tokenized_dataset = tokenized_dataset.map( + group_texts, + batched=True, + load_from_cache_file=False, + desc=f"Grouping texts in chunks of {block_size}", + ) + + return tokenized_dataset + elif task == "pretraining": + group = True + block_size = config["Dataset"].get("block_size", 512) + tokenizer.pad_token = tokenizer.eos_token if not tokenizer.pad_token else tokenizer.pad_token + + processor = PretrainingDataProcessor(config, tokenizer) + + column_names = list(dataset["train"].features) + + tokenized_dataset = dataset.map( + processor.tokenize, + remove_columns=column_names, + batched=True, + load_from_cache_file=False, + desc="Tokenize dataset", + ) + if group: def group_texts(examples): @@ -258,7 +301,7 @@ def group_texts(examples): def prepare_data_collator(config: Dict, tokenizer): task = config["General"].get("task", "instruction_tuning") - if task == "instruction_tuning": + if task == "instruction_tuning" or task == "pretraining": return transformers.DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm=False, return_tensors="pt", pad_to_multiple_of=8 ) @@ -280,10 +323,10 @@ def load_model(config: Dict): model_dtype = convert_dtype(config["Training"].get("mixed_precision", "no")) model_config = config["General"].get("config", {}) task = config["General"].get("task", "instruction_tuning") - if task == "instruction_tuning": + if task == "instruction_tuning" or task == "pretraining": model = transformers.AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=model_dtype, **model_config) lora_config = config["General"].get("lora_config", None) - if lora_config: + if lora_config and task != "pretraining": peft_config = LoraConfig(**lora_config) model = get_peft_model(model, peft_config) elif task == "rerank": @@ -326,7 +369,7 @@ def load_model(config: Dict): def get_trainer(config: Dict, model, tokenizer, tokenized_dataset, data_collator): device = config["Training"]["device"] - if device in ["cpu", "gpu"]: + if device in ["cpu", "gpu", "cuda"]: training_args = convert_to_training_args(TrainingArguments, config) trainer = Trainer( model=model, diff --git a/comps/reranks/mosec/langchain/dependency/Dockerfile b/comps/reranks/mosec/langchain/dependency/Dockerfile index 0875a2a79e..25dbeafece 100644 --- a/comps/reranks/mosec/langchain/dependency/Dockerfile +++ b/comps/reranks/mosec/langchain/dependency/Dockerfile @@ -18,7 +18,7 @@ RUN pip3 install intel-extension-for-pytorch==2.2.0 RUN pip3 install transformers sentence-transformers RUN pip3 install llmspec mosec -RUN cd /home/user/ && export HF_ENDPOINT=https://hf-mirror.com && huggingface-cli download --resume-download BAAI/bge-reranker-large --local-dir /home/user/bge-reranker-large +RUN cd /home/user/ && export HF_ENDPOINT=https://hf-mirror.com && huggingface-cli download --resume-download BAAI/bge-reranker-base --local-dir /home/user/bge-reranker-large USER user ENV EMB_MODEL="/home/user/bge-reranker-large/" diff --git a/comps/reranks/neural-speed/README.md b/comps/reranks/neural-speed/README.md new file mode 100644 index 0000000000..c1841e16aa --- /dev/null +++ b/comps/reranks/neural-speed/README.md @@ -0,0 +1,32 @@ +# build Mosec endpoint docker image + +``` +docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -t langchain-mosec:neuralspeed-reranks -f comps/reranks/neural-speed/neuralspeed-docker/Dockerfile . +``` + +# build Reranking microservice docker image + +``` +docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -t opea/reranking-langchain-mosec:neuralspeed -f comps/reranks/neural-speed/docker/Dockerfile . +``` + +Note: Please contact us to request model files before building images. + +# launch Mosec endpoint docker container + +``` +docker run -d --name="reranking-langchain-mosec-endpoint" -p 6001:8000 langchain-mosec:neuralspeed-reranks +``` + +# launch Reranking microservice docker container + +``` +export MOSEC_RERANKING_ENDPOINT=http://127.0.0.1:6001 +docker run -d --name="reranking-langchain-mosec-server" -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p 6000:8000 --ipc=host -e MOSEC_RERANKING_ENDPOINT=$MOSEC_RERANKING_ENDPOINT opea/reranking-langchain-mosec:neuralspeed +``` + +# run client test + +``` +curl http://localhost:6000/v1/reranking -X POST -d '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}' -H 'Content-Type: application/json' +``` diff --git a/comps/reranks/neural-speed/__init__.py b/comps/reranks/neural-speed/__init__.py new file mode 100644 index 0000000000..916f3a44b2 --- /dev/null +++ b/comps/reranks/neural-speed/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/comps/reranks/neural-speed/docker/Dockerfile b/comps/reranks/neural-speed/docker/Dockerfile new file mode 100644 index 0000000000..8ffed65ec9 --- /dev/null +++ b/comps/reranks/neural-speed/docker/Dockerfile @@ -0,0 +1,31 @@ + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM langchain/langchain:latest + +RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ + libgl1-mesa-glx \ + libjemalloc-dev \ + vim + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +COPY comps /home/user/comps + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/user/comps/reranks/neural-speed/requirements.txt + +RUN pip3 install llmspec mosec msgspec httpx requests +RUN pip3 install torch==2.2.2 --trusted-host download.pytorch.org --index-url https://download.pytorch.org/whl/cpu + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/reranks/neural-speed + +ENTRYPOINT ["python", "reranking_neuralspeed_svc.py"] + diff --git a/comps/reranks/neural-speed/docker/docker_compose_embedding.yaml b/comps/reranks/neural-speed/docker/docker_compose_embedding.yaml new file mode 100644 index 0000000000..d5f59b4a07 --- /dev/null +++ b/comps/reranks/neural-speed/docker/docker_compose_embedding.yaml @@ -0,0 +1,22 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +version: "3.8" + +services: + reranking: + image: opea/reranking-langchain-mosec:neuralspeed + container_name: reranking-langchain-mosec-server + ports: + - "6000:8000" + ipc: host + environment: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + MOSEC_RERANKING_ENDPOINT: ${MOSEC_RERANKING_ENDPOINT} + LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY} + restart: unless-stopped + +networks: + default: + driver: bridge diff --git a/comps/reranks/neural-speed/neuralspeed-docker/Dockerfile b/comps/reranks/neural-speed/neuralspeed-docker/Dockerfile new file mode 100644 index 0000000000..42dcbad8c3 --- /dev/null +++ b/comps/reranks/neural-speed/neuralspeed-docker/Dockerfile @@ -0,0 +1,27 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +From ubuntu:22.04 +ARG DEBIAN_FRONTEND=noninteractive + +ENV GLIBC_TUNABLES glibc.cpu.x86_shstk=permissive + +COPY comps /root/comps +COPY neural_speed-0.1.dev45+g41ea0aa-cp310-cp310-linux_x86_64.whl /root/ +COPY bge-large-r-q8.bin /root/ +COPY libstdc++.so.6 /root/ + +RUN apt update && apt install -y python3 python3-pip +RUN pip3 install -r /root/comps/reranks/neural-speed/neuralspeed-docker/requirements.txt +RUN pip3 install llmspec mosec msgspec httpx requests +RUN pip3 install /root/neural_speed-0.1.dev45+g41ea0aa-cp310-cp310-linux_x86_64.whl + +RUN cd /root/ && export HF_ENDPOINT=https://hf-mirror.com && huggingface-cli download --resume-download BAAI/bge-reranker-large --local-dir /root/bge-reranker-large + + +ENV LD_PRELOAD=/root/libstdc++.so.6 + + +WORKDIR /root/comps/reranks/neural-speed/neuralspeed-docker + +CMD ["python3", "server.py"] diff --git a/comps/reranks/neural-speed/neuralspeed-docker/client.py b/comps/reranks/neural-speed/neuralspeed-docker/client.py new file mode 100644 index 0000000000..02017faaf2 --- /dev/null +++ b/comps/reranks/neural-speed/neuralspeed-docker/client.py @@ -0,0 +1,35 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +from http import HTTPStatus + +import httpx +import msgspec +import requests + +req = { + "query": "talk is cheap, show me the code", + "docs": [ + "what a nice day", + "life is short, use python", + "early bird catches the worm", + ], +} + +httpx_response = httpx.post("http://127.0.0.1:8080/inference", content=msgspec.msgpack.encode(req)) + +requests_response = requests.post("http://127.0.0.1:8080/inference", data=msgspec.msgpack.encode(req)) + +MOSEC_RERANKING_ENDPOINT = os.environ.get("MOSEC_RERANKING_ENDPOINT", "http://127.0.0.1:8080") + +request_url = MOSEC_RERANKING_ENDPOINT + "/inference" +print(f"request_url = {request_url}") +resp_3 = requests.post(request_url, data=msgspec.msgpack.encode(req)) + +if httpx_response.status_code == HTTPStatus.OK and requests_response.status_code == HTTPStatus.OK: + print(f"OK: \n {msgspec.msgpack.decode(httpx_response.content)}") + print(f"OK: \n {msgspec.msgpack.decode(requests_response.content)}") + print(f"OK: \n {msgspec.msgpack.decode(resp_3.content)}") +else: + print(f"err[{httpx_response.status_code}] {httpx_response.text}") diff --git a/comps/reranks/neural-speed/neuralspeed-docker/client_multibatch.py b/comps/reranks/neural-speed/neuralspeed-docker/client_multibatch.py new file mode 100644 index 0000000000..09eee1dfbf --- /dev/null +++ b/comps/reranks/neural-speed/neuralspeed-docker/client_multibatch.py @@ -0,0 +1,45 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from http import HTTPStatus +from threading import Thread + +import httpx +import msgspec + +req = { + "query": "talk is cheap, show me the code", + "docs": [ + "what a nice day", + "life is short, use python", + "early bird catches the worm", + ], +} +reqs = [] +BATCH = 32 +for i in range(BATCH): + reqs.append(msgspec.msgpack.encode(req)) + + +def post_func(threadIdx): + resp = httpx.post("http://127.0.0.1:8080/inference", content=reqs[threadIdx]) + ret = f"thread {threadIdx} \n" + if resp.status_code == HTTPStatus.OK: + ret += f"OK: {msgspec.msgpack.decode(resp.content)['scores']}" + else: + ret += f"err[{resp.status_code}] {resp.text}" + print(ret) + + +threads = [] +for i in range(BATCH): + t = Thread( + target=post_func, + args=[ + i, + ], + ) + threads.append(t) + +for i in range(BATCH): + threads[i].start() diff --git a/comps/reranks/neural-speed/neuralspeed-docker/requirements.txt b/comps/reranks/neural-speed/neuralspeed-docker/requirements.txt new file mode 100644 index 0000000000..50dc540fcd --- /dev/null +++ b/comps/reranks/neural-speed/neuralspeed-docker/requirements.txt @@ -0,0 +1,16 @@ +--extra-index-url https://download.pytorch.org/whl/cpu +accelerate +cmake +datasets +huggingface_hub +matplotlib +numpy +peft +protobuf<3.20 +py-cpuinfo +sentencepiece +tiktoken +torch +transformers +transformers_stream_generator +zipfile38 diff --git a/comps/reranks/neural-speed/neuralspeed-docker/server.py b/comps/reranks/neural-speed/neuralspeed-docker/server.py new file mode 100644 index 0000000000..0176abcfbe --- /dev/null +++ b/comps/reranks/neural-speed/neuralspeed-docker/server.py @@ -0,0 +1,91 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import time +from typing import Any, List + +import numpy +from mosec import Server, Worker, get_logger +from mosec.mixin import TypedMsgPackMixin +from msgspec import Struct +from neural_speed import Model +from transformers import AutoModelForSequenceClassification, AutoTokenizer + +logger = get_logger() + +INFERENCE_BATCH_SIZE = 128 +INFERENCE_MAX_WAIT_TIME = 10 +INFERENCE_WORKER_NUM = 1 +INFERENCE_CONTEXT = 512 + +TorchModel = "/root/bge-reranker-large" +NS_Bin = "/root/bge-large-r-q8.bin" + +NS_Model = "bert" + + +class Request(Struct, kw_only=True): + query: str + docs: List[str] + + +class Response(Struct, kw_only=True): + scores: List[float] + + +class Inference(TypedMsgPackMixin, Worker): + + def __init__(self): + super().__init__() + self.tokenizer = AutoTokenizer.from_pretrained(TorchModel) + self.model = Model() + self.model.init_from_bin( + NS_Model, + NS_Bin, + batch_size=INFERENCE_BATCH_SIZE, + n_ctx=INFERENCE_CONTEXT + 2, + ) + + def forward(self, data: List[Request]) -> List[Response]: + batch = len(data) + ndoc = [] + inps = [] + for data in data: + inp = [[data.query, doc] for doc in data.docs] + inps.extend(inp) + ndoc.append(len(data.docs)) + outs = [] + for i in range(0, len(inps), INFERENCE_BATCH_SIZE): + inp_bs = inps[i : i + INFERENCE_BATCH_SIZE] + inputs = self.tokenizer( + inp_bs, padding=True, truncation=True, max_length=INFERENCE_CONTEXT, return_tensors="pt" + ) + st = time.time() + output = self.model( + **inputs, + reinit=True, + logits_all=True, + continuous_batching=False, + ignore_padding=True, + ) + logger.info(f"Toal batch {batch} input shape {inputs.input_ids.shape} time {time.time()-st}") + outs.append(output) + ns_outputs = numpy.concatenate(outs, axis=0) + resps = [] + pos = 0 + for i in range(batch): + resp = Response(scores=ns_outputs[pos : pos + ndoc[i]].tolist()) + pos += ndoc[i] + resps.append(resp) + return resps + + +if __name__ == "__main__": + INFERENCE_BATCH_SIZE = int(os.environ.get("MAX_BATCH_SIZE", 128)) + INFERENCE_MAX_WAIT_TIME = int(os.environ.get("MAX_WAIT_TIME", 1)) + server = Server() + server.append_worker( + Inference, max_batch_size=INFERENCE_BATCH_SIZE, max_wait_time=INFERENCE_MAX_WAIT_TIME, num=INFERENCE_WORKER_NUM + ) + server.run() diff --git a/comps/reranks/neural-speed/requirements.txt b/comps/reranks/neural-speed/requirements.txt new file mode 100644 index 0000000000..9fa1a059ce --- /dev/null +++ b/comps/reranks/neural-speed/requirements.txt @@ -0,0 +1,11 @@ +docarray[full] +fastapi +langchain +langchain_community +openai +opentelemetry-api +opentelemetry-exporter-otlp +opentelemetry-sdk +prometheus-fastapi-instrumentator +shortuuid +uvicorn diff --git a/comps/reranks/neural-speed/reranking_neuralspeed_svc.py b/comps/reranks/neural-speed/reranking_neuralspeed_svc.py new file mode 100644 index 0000000000..098378a527 --- /dev/null +++ b/comps/reranks/neural-speed/reranking_neuralspeed_svc.py @@ -0,0 +1,93 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import heapq +import json +import os +import re +import time +from typing import List, Optional, Union + +import httpx +import msgspec +import requests +import torch +from langchain_core.prompts import ChatPromptTemplate +from langsmith import traceable + +from comps import ( + CustomLogger, + LLMParamsDoc, + SearchedDoc, + ServiceType, + opea_microservices, + register_microservice, + register_statistics, + statistics_dict, +) +from comps.cores.proto.api_protocol import ( + ChatCompletionRequest, + RerankingRequest, + RerankingResponse, + RerankingResponseData, +) + + +@register_microservice( + name="opea_service@reranking_mosec", + service_type=ServiceType.RERANK, + endpoint="/v1/reranking", + host="0.0.0.0", + port=8000, + input_datatype=SearchedDoc, + output_datatype=LLMParamsDoc, +) +@traceable(run_type="reranking") +@register_statistics(names=["opea_service@reranking_mosec"]) +def reranking( + input: Union[SearchedDoc, RerankingRequest, ChatCompletionRequest] +) -> Union[LLMParamsDoc, RerankingResponse, ChatCompletionRequest]: + start = time.time() + reranking_results = [] + if input.retrieved_docs: + docs = [doc.text for doc in input.retrieved_docs] + url = mosec_reranking_endpoint + "/inference" + if isinstance(input, SearchedDoc): + query = input.initial_query + else: + # for RerankingRequest, ChatCompletionRequest + query = input.input + data = {"query": query, "docs": docs} + resp = requests.post(url, data=msgspec.msgpack.encode(data)) + response_list = msgspec.msgpack.decode(resp.content)["scores"] + response = torch.nn.functional.sigmoid(torch.tensor(response_list)) + length = len(response) + resp_list = response.tolist() + sorted_score = heapq.nlargest(length, resp_list) + sorted_score_index = map(resp_list.index, sorted_score) + + for i in range(input.top_n): + reranking_results.append( + {"text": input.retrieved_docs[list(sorted_score_index)[i]].text, "score": sorted_score[i]} + ) + + statistics_dict["opea_service@reranking_mosec"].append_latency(time.time() - start, None) + if isinstance(input, SearchedDoc): + return LLMParamsDoc(query=input.initial_query, documents=[doc["text"] for doc in reranking_results]) + else: + reranking_docs = [] + for doc in reranking_results: + reranking_docs.append(RerankingResponseData(text=doc["text"], score=doc["score"])) + if isinstance(input, RerankingRequest): + return RerankingResponse(reranked_docs=reranking_docs) + + if isinstance(input, ChatCompletionRequest): + input.reranked_docs = reranking_docs + input.documents = [doc["text"] for doc in reranking_results] + return input + + +if __name__ == "__main__": + mosec_reranking_endpoint = os.getenv("MOSEC_RERANKING_ENDPOINT", "http://localhost:8080") + print("NeuralSpeed Reranking Microservice Initialized.") + opea_microservices["opea_service@reranking_mosec"].start() diff --git a/comps/reranks/tei/README.md b/comps/reranks/tei/README.md index cf11c31fe8..a46673119b 100644 --- a/comps/reranks/tei/README.md +++ b/comps/reranks/tei/README.md @@ -16,7 +16,7 @@ pip install -r requirements.txt ```bash export HF_TOKEN=${your_hf_api_token} -export RERANK_MODEL_ID="BAAI/bge-reranker-large" +export RERANK_MODEL_ID="BAAI/bge-reranker-base" volume=$PWD/data docker run -d -p 6060:80 -v $volume:/data -e http_proxy=$http_proxy -e https_proxy=$https_proxy --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $RERANK_MODEL_ID --hf-api-token $HF_TOKEN ``` diff --git a/comps/reranks/tei/local_reranking.py b/comps/reranks/tei/local_reranking.py index 284cca7e6d..fca2e68e6f 100644 --- a/comps/reranks/tei/local_reranking.py +++ b/comps/reranks/tei/local_reranking.py @@ -41,5 +41,5 @@ def reranking(input: SearchedDoc) -> RerankedDoc: if __name__ == "__main__": - reranker_model = CrossEncoder(model_name="BAAI/bge-reranker-large", max_length=512) + reranker_model = CrossEncoder(model_name="BAAI/bge-reranker-base", max_length=512) opea_microservices["opea_service@local_reranking"].start() diff --git a/tests/embeddings/test_embeddings_mosec_langchain.sh b/tests/embeddings/test_embeddings_mosec_langchain.sh index 7e15ee5548..4140e70112 100644 --- a/tests/embeddings/test_embeddings_mosec_langchain.sh +++ b/tests/embeddings/test_embeddings_mosec_langchain.sh @@ -33,7 +33,7 @@ function build_docker_images() { function start_service() { mosec_endpoint=5001 - model="BAAI/bge-large-en-v1.5" + model="BAAI/bge-base-en-v1.5" unset http_proxy docker run -d --name="test-comps-embedding-langchain-mosec-endpoint" -p $mosec_endpoint:8000 opea/embedding-langchain-mosec-endpoint:comps export MOSEC_EMBEDDING_ENDPOINT="http://${ip_address}:${mosec_endpoint}" diff --git a/tests/embeddings/test_embeddings_tei_langchain.sh b/tests/embeddings/test_embeddings_tei_langchain.sh index f7d21da50f..4031343b25 100644 --- a/tests/embeddings/test_embeddings_tei_langchain.sh +++ b/tests/embeddings/test_embeddings_tei_langchain.sh @@ -20,8 +20,8 @@ function build_docker_images() { } function start_service() { - tei_endpoint=5001 - model="BAAI/bge-large-en-v1.5" + tei_endpoint=5001= + model="BAAI/bge-base-en-v1.5" unset http_proxy docker run -d --name="test-comps-embedding-tei-endpoint" -p $tei_endpoint:80 -v ./data:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:${tei_endpoint}" diff --git a/tests/embeddings/test_embeddings_tei_llama_index.sh b/tests/embeddings/test_embeddings_tei_llama_index.sh index d0df6bee80..46bbd150cb 100644 --- a/tests/embeddings/test_embeddings_tei_llama_index.sh +++ b/tests/embeddings/test_embeddings_tei_llama_index.sh @@ -22,7 +22,7 @@ function build_docker_images() { function start_service() { tei_endpoint=5001 - model="BAAI/bge-large-en-v1.5" + model="BAAI/bge-base-en-v1.5" docker run -d --name="test-comps-embedding-tei-llama-index-endpoint" -p $tei_endpoint:80 -v ./data:/data -e http_proxy=$http_proxy -e https_proxy=$https_proxy --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:${tei_endpoint}" tei_service_port=5034 diff --git a/tests/reranks/test_reranks_mosec_langchain.sh b/tests/reranks/test_reranks_mosec_langchain.sh index cc78007f4e..7d0a1a288f 100644 --- a/tests/reranks/test_reranks_mosec_langchain.sh +++ b/tests/reranks/test_reranks_mosec_langchain.sh @@ -33,7 +33,7 @@ function build_docker_images() { function start_service() { mosec_endpoint=5006 - model="BAAI/bge-reranker-large" + model="BAAI/bge-reranker-base" unset http_proxy docker run -d --name="test-comps-reranking-langchain-mosec-endpoint" -p $mosec_endpoint:8000 opea/reranking-langchain-mosec-endpoint:comps export MOSEC_RERANKING_ENDPOINT="http://${ip_address}:${mosec_endpoint}" diff --git a/tests/reranks/test_reranks_tei.sh b/tests/reranks/test_reranks_tei.sh index 97ccccfd7d..f28a0a1899 100644 --- a/tests/reranks/test_reranks_tei.sh +++ b/tests/reranks/test_reranks_tei.sh @@ -21,10 +21,10 @@ function start_service() { tei_endpoint=5006 # Remember to set HF_TOKEN before invoking this test! export HF_TOKEN=${HF_TOKEN} - model=BAAI/bge-reranker-large + model=BAAI/bge-reranker-base revision=refs/pr/4 volume=$PWD/data - docker run -d --name="test-comps-reranking-tei-endpoint" -p $tei_endpoint:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 --model-id $model --revision $revision + docker run -d --name="test-comps-reranking-tei-endpoint" -p $tei_endpoint:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 --model-id $model export TEI_RERANKING_ENDPOINT="http://${ip_address}:${tei_endpoint}" tei_service_port=5007 diff --git a/tests/test_finetuning_llm_pretraining.sh b/tests/test_finetuning_llm_pretraining.sh new file mode 100644 index 0000000000..69460fbc0f --- /dev/null +++ b/tests/test_finetuning_llm_pretraining.sh @@ -0,0 +1,118 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -x + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') +finetuning_service_port=8015 +ray_port=8265 + +function build_docker_images() { + cd $WORKPATH + echo $(pwd) + docker build -t opea/finetuning:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy --build-arg HF_TOKEN=$HF_TOKEN -f comps/finetuning/docker/Dockerfile_cpu . + if [ $? -ne 0 ]; then + echo "opea/finetuning built fail" + exit 1 + else + echo "opea/finetuning built successful" + fi +} + +function start_service() { + export no_proxy="localhost,127.0.0.1,"${ip_address} + docker run -d --name="finetuning-server" -p $finetuning_service_port:$finetuning_service_port -p $ray_port:$ray_port --runtime=runc --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e no_proxy=$no_proxy opea/finetuning:latest + sleep 1m +} + +function validate_microservice() { + cd $LOG_PATH + export no_proxy="localhost,127.0.0.1,"${ip_address} + + # test /v1/dataprep upload file + URL="http://${ip_address}:$finetuning_service_port/v1/files" + cat < test_data.json +{"text": "Five women walk along a beach wearing flip-flops."} +{"text": "A woman standing on a high cliff on one leg looking over a river."} +{"text": "Two woman are playing instruments; one a clarinet, the other a violin."} +{"text": "A girl with a blue tank top sitting watching three dogs."} +{"text": "A yellow dog running along a forest path."} +{"text": "It sets out essential activities in each phase along with critical factors related to those activities."} +EOF + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'file=@./test_data.json' -F purpose="fine-tune" -H 'Content-Type: multipart/form-data' "$URL") + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + SERVICE_NAME="finetuning-server - upload - file" + + # Parse the JSON response + purpose=$(echo "$RESPONSE_BODY" | jq -r '.purpose') + filename=$(echo "$RESPONSE_BODY" | jq -r '.filename') + + # Define expected values + expected_purpose="fine-tune" + expected_filename="test_data.json" + + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_upload_file.log + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # Check if the parsed values match the expected values + if [[ "$purpose" != "$expected_purpose" || "$filename" != "$expected_filename" ]]; then + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_upload_file.log + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + # test /v1/fine_tuning/jobs + URL="http://${ip_address}:$finetuning_service_port/v1/fine_tuning/jobs" + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -H 'Content-Type: application/json' -d '{"training_file": "test_data.json","model": "facebook/opt-125m","General":{"task":"pretraining","lora_config":null}}' "$URL") + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + SERVICE_NAME="finetuning-server - create finetuning job" + + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_create.log + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + if [[ "$RESPONSE_BODY" != *'{"id":"ft-job'* ]]; then + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_create.log + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + sleep 3m +} + +function stop_docker() { + cid=$(docker ps -aq --filter "name=finetuning-server*") + if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi +} + +function main() { + + stop_docker + + build_docker_images + start_service + + validate_microservice + + stop_docker + echo y | docker system prune + +} + +main diff --git a/tests/test_finetuning_rerank.sh b/tests/test_finetuning_rerank.sh new file mode 100644 index 0000000000..fd594bf6ca --- /dev/null +++ b/tests/test_finetuning_rerank.sh @@ -0,0 +1,117 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -x + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" +ip_address=$(hostname -I | awk '{print $1}') +finetuning_service_port=8015 +ray_port=8265 + +function build_docker_images() { + cd $WORKPATH + echo $(pwd) + docker build -t opea/finetuning:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy --build-arg HF_TOKEN=$HF_TOKEN -f comps/finetuning/docker/Dockerfile_cpu . + if [ $? -ne 0 ]; then + echo "opea/finetuning built fail" + exit 1 + else + echo "opea/finetuning built successful" + fi +} + +function start_service() { + export no_proxy="localhost,127.0.0.1,"${ip_address} + docker run -d --name="finetuning-server" -p $finetuning_service_port:$finetuning_service_port -p $ray_port:$ray_port --runtime=runc --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e no_proxy=$no_proxy opea/finetuning:latest + sleep 1m +} + +function validate_microservice() { + cd $LOG_PATH + export no_proxy="localhost,127.0.0.1,"${ip_address} + + # test /v1/dataprep upload file + URL="http://${ip_address}:$finetuning_service_port/v1/files" + cat < test_data.json +{"query": "Five women walk along a beach wearing flip-flops.", "pos": ["Some women with flip-flops on, are walking along the beach"], "neg": ["The 4 women are sitting on the beach.", "There was a reform in 1996.", "She's not going to court to clear her record.", "The man is talking about hawaii.", "A woman is standing outside.", "The battle was over. ", "A group of people plays volleyball."]} +{"query": "A woman standing on a high cliff on one leg looking over a river.", "pos": ["A woman is standing on a cliff."], "neg": ["A woman sits on a chair.", "George Bush told the Republicans there was no way he would let them even consider this foolish idea, against his top advisors advice.", "The family was falling apart.", "no one showed up to the meeting", "A boy is sitting outside playing in the sand.", "Ended as soon as I received the wire.", "A child is reading in her bedroom."]} +{"query": "Two woman are playing instruments; one a clarinet, the other a violin.", "pos": ["Some people are playing a tune."], "neg": ["Two women are playing a guitar and drums.", "A man is skiing down a mountain.", "The fatal dose was not taken when the murderer thought it would be.", "Person on bike", "The girl is standing, leaning against the archway.", "A group of women watch soap operas.", "No matter how old people get they never forget. "]} +{"query": "A girl with a blue tank top sitting watching three dogs.", "pos": ["A girl is wearing blue."], "neg": ["A girl is with three cats.", "The people are watching a funeral procession.", "The child is wearing black.", "Financing is an issue for us in public schools.", "Kids at a pool.", "It is calming to be assaulted.", "I face a serious problem at eighteen years old. "]} +{"query": "A yellow dog running along a forest path.", "pos": ["a dog is running"], "neg": ["a cat is running", "Steele did not keep her original story.", "The rule discourages people to pay their child support.", "A man in a vest sits in a car.", "Person in black clothing, with white bandanna and sunglasses waits at a bus stop.", "Neither the Globe or Mail had comments on the current state of Canada's road system. ", "The Spring Creek facility is old and outdated."]} +{"query": "It sets out essential activities in each phase along with critical factors related to those activities.", "pos": ["Critical factors for essential activities are set out."], "neg": ["It lays out critical activities but makes no provision for critical factors related to those activities.", "People are assembled in protest.", "The state would prefer for you to do that.", "A girl sits beside a boy.", "Two males are performing.", "Nobody is jumping", "Conrad was being plotted against, to be hit on the head."]} +EOF + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'file=@./test_data.json' -F purpose="fine-tune" -H 'Content-Type: multipart/form-data' "$URL") + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + SERVICE_NAME="finetuning-server - upload - file" + + # Parse the JSON response + purpose=$(echo "$RESPONSE_BODY" | jq -r '.purpose') + filename=$(echo "$RESPONSE_BODY" | jq -r '.filename') + + # Define expected values + expected_purpose="fine-tune" + expected_filename="test_data.json" + + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_upload_file.log + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # Check if the parsed values match the expected values + if [[ "$purpose" != "$expected_purpose" || "$filename" != "$expected_filename" ]]; then + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_upload_file.log + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + # test /v1/fine_tuning/jobs + URL="http://${ip_address}:$finetuning_service_port/v1/fine_tuning/jobs" + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -H 'Content-Type: application/json' -d '{"training_file": "test_data.json","model": "BAAI/bge-reranker-base","General":{"task":"rerank","lora_config":null}}' "$URL") + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + SERVICE_NAME="finetuning-server - create finetuning job" + + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_create.log + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + if [[ "$RESPONSE_BODY" != *'{"id":"ft-job'* ]]; then + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + docker logs finetuning-server >> ${LOG_PATH}/finetuning-server_create.log + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + sleep 10m +} + +function stop_docker() { + cid=$(docker ps -aq --filter "name=finetuning-server*") + if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi +} + +function main() { + + stop_docker + + build_docker_images + start_service + + validate_microservice + + stop_docker + echo y | docker system prune + +} + +main diff --git a/tests/test_reranks_mosec-neuralspeed.sh b/tests/test_reranks_mosec-neuralspeed.sh new file mode 100644 index 0000000000..4512dc794d --- /dev/null +++ b/tests/test_reranks_mosec-neuralspeed.sh @@ -0,0 +1,84 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -x + +WORKPATH=$(dirname "$PWD") +ip_address=$(hostname -I | awk '{print $1}') + +function build_mosec_docker_images() { + cd $WORKPATH + echo $(pwd) + cp /data2/nswhl/* ./ + docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -t langchain-mosec:neuralspeed-reranks -f comps/reranks/neural-speed/neuralspeed-docker/Dockerfile . + if [ $? -ne 0 ]; then + echo "opea/reranking-langchain-mosec-endpoint built fail" + exit 1 + else + echo "opea/reranking-langchain-mosec-endpoint built successful" + fi +} + +function build_docker_images() { + cd $WORKPATH + echo $(pwd) + docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -t opea/reranking-langchain-mosec:neuralspeed -f comps/reranks/neural-speed/docker/Dockerfile . + if [ $? -ne 0 ]; then + echo "opea/reranking-langchain-mosec built fail" + exit 1 + else + echo "opea/reranking-langchain-mosec built successful" + fi +} + +function start_service() { + mosec_endpoint=5006 + model="BAAI/bge-reranker-large" + unset http_proxy + docker run -d --name="test-comps-reranking-langchain-mosec-endpoint" -p $mosec_endpoint:8000 langchain-mosec:neuralspeed-reranks + export MOSEC_RERANKING_ENDPOINT="http://${ip_address}:${mosec_endpoint}" + mosec_service_port=5007 + docker run -d --name="test-comps-reranking-langchain-mosec-server" -e http_proxy=$http_proxy -e https_proxy=$https_proxy -p ${mosec_service_port}:8000 --ipc=host -e MOSEC_RERANKING_ENDPOINT=$MOSEC_RERANKING_ENDPOINT opea/reranking-langchain-mosec:neuralspeed + sleep 3m +} + +function validate_microservice() { + mosec_service_port=5007 + result=$(http_proxy="" curl http://${ip_address}:${mosec_service_port}/v1/reranking\ + -X POST \ + -d '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}' \ + -H 'Content-Type: application/json') + if [[ $result == *"Deep"* ]]; then + echo "Result correct." + else + echo "Result wrong. Received was $result" + docker logs test-comps-reranking-langchain-mosec-endpoint + docker logs test-comps-reranking-langchain-mosec-server + exit 1 + fi +} + +function stop_docker() { + cid=$(docker ps -aq --filter "name=test-comps-reranking-langchain-mosec-*") + if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi +} + +function main() { + + stop_docker + + build_mosec_docker_images + + build_docker_images + + start_service + + validate_microservice + + stop_docker + echo y | docker system prune + +} + +main