diff --git a/ChatQnA/kubernetes/manifests/README.md b/ChatQnA/kubernetes/README.md similarity index 100% rename from ChatQnA/kubernetes/manifests/README.md rename to ChatQnA/kubernetes/README.md diff --git a/ChatQnA/kubernetes/chatQnA_gaudi.yaml b/ChatQnA/kubernetes/chatQnA_gaudi.yaml index a8aabae95..d2b9905fe 100644 --- a/ChatQnA/kubernetes/chatQnA_gaudi.yaml +++ b/ChatQnA/kubernetes/chatQnA_gaudi.yaml @@ -23,6 +23,7 @@ spec: serviceName: embedding-svc config: endpoint: /v1/embeddings + TEI_EMBEDDING_ENDPOINT: tei-embedding-gaudi-svc - name: TeiEmbeddingGaudi internalService: serviceName: tei-embedding-gaudi-svc @@ -33,6 +34,8 @@ spec: serviceName: retriever-svc config: endpoint: /v1/retrieval + REDIS_URL: redis-vector-db + TEI_EMBEDDING_ENDPOINT: tei-embedding-gaudi-svc - name: VectorDB internalService: serviceName: redis-vector-db @@ -43,6 +46,7 @@ spec: serviceName: reranking-svc config: endpoint: /v1/reranking + TEI_RERANKING_ENDPOINT: tei-reranking-svc - name: TeiReranking internalService: serviceName: tei-reranking-svc @@ -55,6 +59,7 @@ spec: serviceName: llm-svc config: endpoint: /v1/chat/completions + TGI_LLM_ENDPOINT: tgi-gaudi-svc - name: TgiGaudi internalService: serviceName: tgi-gaudi-svc diff --git a/ChatQnA/kubernetes/chatQnA_switch_gaudi.yaml b/ChatQnA/kubernetes/chatQnA_switch_gaudi.yaml new file mode 100644 index 000000000..0af8cebda --- /dev/null +++ b/ChatQnA/kubernetes/chatQnA_switch_gaudi.yaml @@ -0,0 +1,124 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: gmc.opea.io/v1alpha3 +kind: GMConnector +metadata: + labels: + app.kubernetes.io/name: gmconnector + app.kubernetes.io/managed-by: kustomize + gmc/platform: gaudi + name: switch + namespace: switch +spec: + routerConfig: + name: router + serviceName: router-service + nodes: + root: + routerType: Sequence + steps: + - name: Embedding + nodeName: node1 + - name: Reranking + data: $response + internalService: + serviceName: reranking-svc + config: + endpoint: /v1/reranking + TEI_RERANKING_ENDPOINT: tei-reranking-svc + - name: TeiReranking + internalService: + serviceName: tei-reranking-svc + config: + endpoint: /rerank + isDownstreamService: true + - name: Llm + data: $response + nodeName: node2 + node1: + routerType: Switch + steps: + - name: Embedding + condition: embedding-model-id==large + internalService: + serviceName: embedding-svc-large + config: + endpoint: /v1/embeddings + TEI_EMBEDDING_ENDPOINT: tei-embedding-gaudi-svc-bge15 + - name: Embedding + condition: embedding-model-id==small + internalService: + serviceName: embedding-svc-small + config: + endpoint: /v1/embeddings + TEI_EMBEDDING_ENDPOINT: tei-embedding-gaudi-svc-bge-small + - name: TeiEmbeddingGaudi + internalService: + serviceName: tei-embedding-gaudi-svc-bge15 + config: + MODEL_ID: BAAI/bge-base-en-v1.5 + isDownstreamService: true + - name: TeiEmbeddingGaudi + internalService: + serviceName: tei-embedding-gaudi-svc-bge-small + config: + MODEL_ID: BAAI/bge-base-en-v1.5 + isDownstreamService: true + - name: Retriever + condition: embedding-model-id==large + data: $response + internalService: + serviceName: retriever-svc-large + config: + endpoint: /v1/retrieval + REDIS_URL: redis-vector-db-large + TEI_EMBEDDING_ENDPOINT: tei-embedding-gaudi-svc-bge15 + - name: Retriever + condition: embedding-model-id==small + data: $response + internalService: + serviceName: retriever-svc-small + config: + endpoint: /v1/retrieval + REDIS_URL: redis-vector-db-small + TEI_EMBEDDING_ENDPOINT: tei-embedding-gaudi-svc-bge-small + - name: VectorDB + internalService: + serviceName: redis-vector-db-large + isDownstreamService: true + - name: VectorDB + internalService: + serviceName: redis-vector-db-small + isDownstreamService: true + node2: + routerType: Switch + steps: + - name: Llm + condition: model-id==intel + internalService: + serviceName: llm-svc-intel + config: + endpoint: /v1/chat/completions + TGI_LLM_ENDPOINT: tgi-gaudi-service-intel + - name: Llm + condition: model-id==llama + internalService: + serviceName: llm-svc-llama + config: + endpoint: /v1/chat/completions + TGI_LLM_ENDPOINT: tgi-gaudi-service-llama + - name: TgiGaudi + internalService: + serviceName: tgi-gaudi-service-intel + config: + endpoint: /generate + MODEL_ID: Intel/neural-chat-7b-v3-3 + isDownstreamService: true + - name: TgiGaudi + internalService: + serviceName: tgi-gaudi-service-llama + config: + endpoint: /generate + MODEL_ID: openlm-research/open_llama_3b + isDownstreamService: true diff --git a/ChatQnA/kubernetes/chatQnA_switch_xeon.yaml b/ChatQnA/kubernetes/chatQnA_switch_xeon.yaml new file mode 100644 index 000000000..4f06a2106 --- /dev/null +++ b/ChatQnA/kubernetes/chatQnA_switch_xeon.yaml @@ -0,0 +1,124 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: gmc.opea.io/v1alpha3 +kind: GMConnector +metadata: + labels: + app.kubernetes.io/name: gmconnector + app.kubernetes.io/managed-by: kustomize + gmc/platform: xeon + name: switch + namespace: switch +spec: + routerConfig: + name: router + serviceName: router-service + nodes: + root: + routerType: Sequence + steps: + - name: Embedding + nodeName: node1 + - name: Reranking + data: $response + internalService: + serviceName: reranking-svc + config: + endpoint: /v1/reranking + TEI_RERANKING_ENDPOINT: tei-reranking-svc + - name: TeiReranking + internalService: + serviceName: tei-reranking-svc + config: + endpoint: /rerank + isDownstreamService: true + - name: Llm + data: $response + nodeName: node2 + node1: + routerType: Switch + steps: + - name: Embedding + condition: embedding-model-id==large + internalService: + serviceName: embedding-svc-large + config: + endpoint: /v1/embeddings + TEI_EMBEDDING_ENDPOINT: tei-embedding-svc-bge15 + - name: Embedding + condition: embedding-model-id==small + internalService: + serviceName: embedding-svc-small + config: + endpoint: /v1/embeddings + TEI_EMBEDDING_ENDPOINT: tei-embedding-svc-bge-small + - name: TeiEmbedding + internalService: + serviceName: tei-embedding-svc-bge15 + config: + MODEL_ID: BAAI/bge-base-en-v1.5 + isDownstreamService: true + - name: TeiEmbedding + internalService: + serviceName: tei-embedding-svc-bge-small + config: + MODEL_ID: BAAI/bge-base-en-v1.5 + isDownstreamService: true + - name: Retriever + condition: embedding-model-id==large + data: $response + internalService: + serviceName: retriever-svc-large + config: + endpoint: /v1/retrieval + REDIS_URL: redis-vector-db-large + TEI_EMBEDDING_ENDPOINT: tei-embedding-svc-bge15 + - name: Retriever + condition: embedding-model-id==small + data: $response + internalService: + serviceName: retriever-svc-small + config: + endpoint: /v1/retrieval + REDIS_URL: redis-vector-db-small + TEI_EMBEDDING_ENDPOINT: tei-embedding-svc-bge-small + - name: VectorDB + internalService: + serviceName: redis-vector-db-large + isDownstreamService: true + - name: VectorDB + internalService: + serviceName: redis-vector-db-small + isDownstreamService: true + node2: + routerType: Switch + steps: + - name: Llm + condition: model-id==intel + internalService: + serviceName: llm-svc-intel + config: + endpoint: /v1/chat/completions + TGI_LLM_ENDPOINT: tgi-service-intel + - name: Llm + condition: model-id==llama + internalService: + serviceName: llm-svc-llama + config: + endpoint: /v1/chat/completions + TGI_LLM_ENDPOINT: tgi-service-llama + - name: Tgi + internalService: + serviceName: tgi-service-intel + config: + endpoint: /generate + MODEL_ID: Intel/neural-chat-7b-v3-3 + isDownstreamService: true + - name: Tgi + internalService: + serviceName: tgi-service-llama + config: + endpoint: /generate + MODEL_ID: bigscience/bloom-560m + isDownstreamService: true diff --git a/ChatQnA/kubernetes/chatQnA_xeon.yaml b/ChatQnA/kubernetes/chatQnA_xeon.yaml index 4d91ce59b..ff7cce725 100644 --- a/ChatQnA/kubernetes/chatQnA_xeon.yaml +++ b/ChatQnA/kubernetes/chatQnA_xeon.yaml @@ -23,6 +23,7 @@ spec: serviceName: embedding-svc config: endpoint: /v1/embeddings + TEI_EMBEDDING_ENDPOINT: tei-embedding-svc - name: TeiEmbedding internalService: serviceName: tei-embedding-svc @@ -33,6 +34,8 @@ spec: serviceName: retriever-svc config: endpoint: /v1/retrieval + REDIS_URL: redis-vector-db + TEI_EMBEDDING_ENDPOINT: tei-embedding-svc - name: VectorDB internalService: serviceName: redis-vector-db @@ -43,6 +46,7 @@ spec: serviceName: reranking-svc config: endpoint: /v1/reranking + TEI_RERANKING_ENDPOINT: tei-reranking-svc - name: TeiReranking internalService: serviceName: tei-reranking-svc @@ -55,6 +59,7 @@ spec: serviceName: llm-svc config: endpoint: /v1/chat/completions + TGI_LLM_ENDPOINT: tgi-service-m - name: Tgi internalService: serviceName: tgi-service-m diff --git a/ChatQnA/tests/test_gmc_on_gaudi.sh b/ChatQnA/tests/test_gmc_on_gaudi.sh index 6dbfd677f..7e09dcf1d 100755 --- a/ChatQnA/tests/test_gmc_on_gaudi.sh +++ b/ChatQnA/tests/test_gmc_on_gaudi.sh @@ -64,7 +64,7 @@ function validate_chatqna() { echo "Checking response results, make sure the output is reasonable. " local status=false if [[ -f $LOG_PATH/curl_chatqna.log ]] && \ - [[ $(grep -c "billion" $LOG_PATH/curl_chatqna.log) != 0 ]]; then + [[ $(grep -c "[DONE]" $LOG_PATH/curl_chatqna.log) != 0 ]]; then status=true fi if [ $status == false ]; then diff --git a/ChatQnA/tests/test_gmc_on_xeon.sh b/ChatQnA/tests/test_gmc_on_xeon.sh index 1fe53de88..132f62999 100755 --- a/ChatQnA/tests/test_gmc_on_xeon.sh +++ b/ChatQnA/tests/test_gmc_on_xeon.sh @@ -65,7 +65,7 @@ function validate_chatqna() { echo "Checking response results, make sure the output is reasonable. " local status=false if [[ -f $LOG_PATH/curl_chatqna.log ]] && \ - [[ $(grep -c "billion" $LOG_PATH/curl_chatqna.log) != 0 ]]; then + [[ $(grep -c "[DONE]" $LOG_PATH/curl_chatqna.log) != 0 ]]; then status=true fi if [ $status == false ]; then diff --git a/CodeGen/kubernetes/README.md b/CodeGen/kubernetes/README.md new file mode 100644 index 000000000..09e7d81a7 --- /dev/null +++ b/CodeGen/kubernetes/README.md @@ -0,0 +1,40 @@ +