Skip to content

Commit

Permalink
Merge branch 'reorg' of https://github.com/opea-project/GenAIComps in…
Browse files Browse the repository at this point in the history
…to reorg
  • Loading branch information
letonghan committed Sep 10, 2024
2 parents 0017025 + e65c644 commit 35afbe6
Show file tree
Hide file tree
Showing 11 changed files with 51 additions and 38 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/pr-dockerfile-path-scan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ jobs:
if [[ "$is_use" == "TRUE" ]]; then
echo "Warning: Changed Dockerfile paths:"
echo "$used_files"
echo "Please modify the corresponding README in GenAIComps and ask suyue.chen@intel.com for final confirmation."
echo "Please modify the corresponding README in GenAIComps."
exit 1
fi
Expand All @@ -90,6 +90,7 @@ jobs:
run: |
cd ..
git clone https://github.com/opea-project/GenAIExamples
cd GenAIExamples && git checkout refactor_folder && cd ../ # for test only
- name: Check for changed Dockerfile paths
run: |
Expand Down
13 changes: 7 additions & 6 deletions .github/workflows/pr-examples-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
name: Example-test

on:
pull_request_target:
pull_request:
branches: [main]
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
paths:
Expand Down Expand Up @@ -44,22 +44,23 @@ jobs:
run: |
cd ../ && sudo rm -rf GenAIExamples
git clone https://github.com/opea-project/GenAIExamples.git
cd GenAIExamples/ChatQnA/docker
cd GenAIExamples && git checkout refactor_folder && cd ../ # for test only
cd GenAIExamples/ChatQnA/docker_image_build
cp -r ${{ github.workspace }}/../GenAIComps .
cd ../tests
sed -i '/GenAIComps.git/d' test_chatqna_on_gaudi.sh
cat test_chatqna_on_gaudi.sh
sed -i '/GenAIComps.git/d' test_compose_on_gaudi.sh
cat test_compose_on_gaudi.sh
echo "Run test..."
echo "LOG_DIR=$(pwd)" >> $GITHUB_ENV
export IMAGE_TAG="comps"
timeout 50m bash test_chatqna_on_gaudi.sh
timeout 50m bash test_compose_on_gaudi.sh
- name: Clean up container
if: cancelled() || failure()
run: |
cd ${{ github.workspace }}/../GenAIExamples/ChatQnA/docker/gaudi
cd ${{ github.workspace }}/../GenAIExamples/ChatQnA/docker_compose/intel/hpu/gaudi
docker compose stop && docker compose rm -f
docker system prune -f
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pr-microservice-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ name: MicroService-test

on:
pull_request:
branches: ["main", "*rc"]
branches: ["main", "*rc", "reorg"]
types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped
paths:
- comps/**
Expand Down
2 changes: 1 addition & 1 deletion comps/embeddings/neural-speed/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_p
## build embedding microservice docker image

```
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -t opea/embedding-langchain-mosec:neuralspeed -f comps/embeddings/neural-speed/docker/Dockerfile .
docker build --build-arg http_proxy=$http_proxy --build-arg https_proxy=$https_proxy -t opea/embedding-langchain-mosec:neuralspeed -f comps/embeddings/neural-speed/Dockerfile .
```

Note: Please contact us to request model files before building images.
Expand Down
5 changes: 3 additions & 2 deletions comps/intent_detection/langchain/intent_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@

from langchain import LLMChain, PromptTemplate
from langchain_community.llms import HuggingFaceEndpoint
from template import IntentTemplate

from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice
from template import IntentTemplate


@register_microservice(
Expand All @@ -31,7 +31,8 @@ def llm_generate(input: LLMParamsDoc):
timeout=600,
)

prompt = PromptTemplate(template=IntentTemplate.generate_intent_template, input_variables=["query"])
prompt_template = 'Please identify the intent of the user query. You may only respond with "chitchat" or \QA" without explanations or engaging in conversation.### User Query: {query}, ### Response: '
prompt = PromptTemplate(template=prompt_template, input_variables=["query"])

llm_chain = LLMChain(prompt=prompt, llm=llm)

Expand Down
1 change: 1 addition & 0 deletions comps/intent_detection/langchain/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ opentelemetry-exporter-otlp
opentelemetry-sdk
prometheus-fastapi-instrumentator
shortuuid
uvicorn
8 changes: 0 additions & 8 deletions comps/intent_detection/langchain/template.py

This file was deleted.

2 changes: 1 addition & 1 deletion comps/reranks/video-rag-qna/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ For the `VideoRAGQnA` usecase, during the data preparation phase, frames are ext

```bash
cd GenAIComps
docker build --no-cache -t opea/reranking-videoragqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/video-rag-qna/docker/Dockerfile .
docker build --no-cache -t opea/reranking-videoragqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/video-rag-qna/Dockerfile .
```

### 1.2 Start Rerank Service
Expand Down
2 changes: 1 addition & 1 deletion comps/retrievers/multimodal/redis/langchain/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ export INDEX_NAME=${your_index_name}

```bash
cd ../../../../
docker build -t opea/multimodal-retriever-redis:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/langchain/redis_multimodal/docker/Dockerfile .
docker build -t opea/multimodal-retriever-redis:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/multimodal/redis/langchain/Dockerfile .
```

To start a docker container, you have two options:
Expand Down
20 changes: 10 additions & 10 deletions tests/agent/test_agent_langchain_on_intel_hpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -50,36 +50,36 @@ function start_tgi_service() {

function start_react_langchain_agent_service() {
echo "Starting react_langchain agent microservice"
docker run -d --runtime=runc --name="comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langchain -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
sleep 5s

docker logs comps-agent-endpoint
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}


function start_react_langgraph_agent_service() {
echo "Starting react_langgraph agent microservice"
docker run -d --runtime=runc --name="comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=react_langgraph -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
sleep 5s
docker logs comps-agent-endpoint
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}

function start_react_langgraph_agent_service_openai() {
echo "Starting react_langgraph agent microservice"
docker run -d --runtime=runc --name="comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e model=gpt-4o-mini-2024-07-18 -e strategy=react_langgraph -e llm_engine=openai -e OPENAI_API_KEY=${OPENAI_API_KEY} -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
sleep 5s
docker logs comps-agent-endpoint
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}


function start_ragagent_agent_service() {
echo "Starting rag agent microservice"
docker run -d --runtime=runc --name="comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
docker run -d --runtime=runc --name="test-comps-agent-endpoint" -v $WORKPATH/comps/agent/langchain/tools:/home/user/comps/agent/langchain/tools -p 5042:9090 --ipc=host -e HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -e model=${model} -e strategy=rag_agent -e llm_endpoint_url=http://${ip_address}:${tgi_port} -e llm_engine=tgi -e recursion_limit=10 -e require_human_feedback=false -e tools=/home/user/comps/agent/langchain/tools/custom_tools.yaml opea/comps-agent-langchain:comps
sleep 5s
docker logs comps-agent-endpoint
docker logs test-comps-agent-endpoint
echo "Service started successfully"
}

Expand Down Expand Up @@ -109,7 +109,7 @@ function validate_microservice() {
echo "return value is $EXIT_CODE"
if [ "$EXIT_CODE" == "1" ]; then
docker logs test-comps-tgi-gaudi-service &> ${LOG_PATH}/test-comps-tgi-gaudi-service.log
docker logs comps-agent-endpoint &> ${LOG_PATH}/test-comps-langchain-agent-endpoint.log
docker logs test-comps-agent-endpoint &> ${LOG_PATH}/test-comps-langchain-agent-endpoint.log
exit 1
fi
}
Expand Down Expand Up @@ -137,7 +137,7 @@ function stop_tgi_docker() {
}

function stop_agent_docker() {
cid=$(docker ps -aq --filter "name=comps-agent-endpoint")
cid=$(docker ps -aq --filter "name=test-comps-agent-endpoint")
echo "Stopping the docker containers "${cid}
if [[ ! -z "$cid" ]]; then docker rm $cid -f && sleep 1s; fi
echo "Docker containers stopped successfully"
Expand Down
31 changes: 24 additions & 7 deletions tests/intent_detection/test_intent_detection_langchain.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@ set -xe
WORKPATH=$(dirname "$PWD")
LOG_PATH="$WORKPATH/tests"
ip_address=$(hostname -I | awk '{print $1}')

function build_docker_images() {
cd $WORKPATH
docker build --no-cache -t opea/llm-tgi:latest -f comps/intent_detection/langchain/Dockerfile .
docker build --no-cache -t opea/intent-detection:comps -f comps/intent_detection/langchain/Dockerfile .
}

function start_service() {
Expand All @@ -22,8 +23,19 @@ function start_service() {
export TGI_LLM_ENDPOINT="http://${ip_address}:${tgi_endpoint}"
intent_port=5043
unset http_proxy
docker run -d --name="test-comps-intent-server" -p ${intent_port}:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/llm-tgi:latest
sleep 5m
docker run -d --name="test-comps-intent-server" -p ${intent_port}:9000 --ipc=host -e http_proxy=$http_proxy -e https_proxy=$https_proxy -e TGI_LLM_ENDPOINT=$TGI_LLM_ENDPOINT -e HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN opea/intent-detection:comps

# check whether tgi is fully ready
n=0
until [[ "$n" -ge 100 ]] || [[ $ready == true ]]; do
docker logs test-comps-intent-tgi-endpoint > ${LOG_PATH}/tgi.log
n=$((n+1))
if grep -q Connected ${LOG_PATH}/tgi.log; then
break
fi
sleep 5s
done
sleep 5s
}

function validate_microservice() {
Expand All @@ -33,11 +45,16 @@ function validate_microservice() {
-d '{"query":"What is Deep Learning?","max_new_tokens":10,"top_k":1,"temperature":0.001,"streaming":false}' \
-H 'Content-Type: application/json')

echo "==============="
echo $result
if [[ $result == *"QA"* ]]; then
echo $result
echo "Result correct."
else
echo "Result wrong. Received was $result"
docker logs test-comps-intent-server > ${LOG_PATH}/intent_detection.log
docker logs test-comps-intent-tgi-endpoint > ${LOG_PATH}/tgi.log
exit 1
fi

docker logs test-comps-intent-server >> ${LOG_PATH}/intent_detection.log
docker logs test-comps-intent-tgi-endpoint >> ${LOG_PATH}/tgi-endpoint.log
}

function stop_docker() {
Expand Down

0 comments on commit 35afbe6

Please sign in to comment.