diff --git a/assistant_dists/dream_persona_openai_prompted/dev.yml b/assistant_dists/dream_persona_openai_prompted/dev.yml index 1aeea31c95..04a344bbe0 100644 --- a/assistant_dists/dream_persona_openai_prompted/dev.yml +++ b/assistant_dists/dream_persona_openai_prompted/dev.yml @@ -10,12 +10,12 @@ services: - "./annotators/SentSeg:/src" ports: - 8011:8011 - ranking-based-response-selector: + llm-based-response-selector: volumes: - - "./response_selectors/ranking_based_response_selector:/src" + - "./response_selectors/llm_based_response_selector:/src" - "./common:/src/common" ports: - - 8002:8002 + - 8003:8003 combined-classification: volumes: - "./common:/src/common" diff --git a/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml b/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml index 4bd621a727..af7698a54d 100644 --- a/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml +++ b/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml @@ -2,7 +2,7 @@ services: agent: command: sh -c 'bin/wait && python -m deeppavlov_agent.run agent.pipeline_config=assistant_dists/dream_persona_openai_prompted/pipeline_conf.json' environment: - WAIT_HOSTS: "sentseg:8011, ranking-based-response-selector:8002, combined-classification:8087, + WAIT_HOSTS: "sentseg:8011, llm-based-response-selector:8003, combined-classification:8087, sentence-ranker:8128, prompt-selector:8135, openai-api-chatgpt:8145, dff-dream-persona-chatgpt-prompted-skill:8137, dff-dream-faq-prompted-skill:8170, openai-api-chatgpt-16k:8167" @@ -45,21 +45,22 @@ services: reservations: memory: 2G - ranking-based-response-selector: - env_file: [ .env ] + llm-based-response-selector: + env_file: [ .env,.env_secret ] build: args: - SERVICE_PORT: 8002 + SERVICE_PORT: 8003 SERVICE_NAME: response_selector LANGUAGE: EN - SENTENCE_RANKER_ANNOTATION_NAME: sentence_ranker - SENTENCE_RANKER_SERVICE_URL: http://sentence-ranker:8128/respond - SENTENCE_RANKER_TIMEOUT: 3 - N_UTTERANCES_CONTEXT: 5 + GENERATIVE_SERVICE_URL: http://openai-api-chatgpt:8145/respond + GENERATIVE_SERVICE_CONFIG: openai-chatgpt.json + GENERATIVE_TIMEOUT: 120 + N_UTTERANCES_CONTEXT: 7 + ENVVARS_TO_SEND: OPENAI_API_KEY,OPENAI_ORGANIZATION FILTER_TOXIC_OR_BADLISTED: 1 context: . - dockerfile: ./response_selectors/ranking_based_response_selector/Dockerfile - command: flask run -h 0.0.0.0 -p 8002 + dockerfile: ./response_selectors/llm_based_response_selector/Dockerfile + command: flask run -h 0.0.0.0 -p 8003 environment: - FLASK_APP=server deploy: diff --git a/assistant_dists/dream_persona_openai_prompted/pipeline_conf.json b/assistant_dists/dream_persona_openai_prompted/pipeline_conf.json index e956c24af6..3be93b5dda 100644 --- a/assistant_dists/dream_persona_openai_prompted/pipeline_conf.json +++ b/assistant_dists/dream_persona_openai_prompted/pipeline_conf.json @@ -290,8 +290,8 @@ "response_selector": { "connector": { "protocol": "http", - "timeout": 1.0, - "url": "http://ranking-based-response-selector:8002/respond" + "timeout": 120.0, + "url": "http://llm-based-response-selector:8003/respond" }, "dialog_formatter": "state_formatters.dp_formatters:cropped_dialog", "response_formatter": "state_formatters.dp_formatters:base_response_selector_formatter_service", @@ -302,7 +302,7 @@ "is_enabled": true, "source": { "component": "components/YJzc7NwGrLmKp6gfZJh7X1.yml", - "service": "response_selectors/ranking_based_response_selector/service_configs/ranking-based-response-selector" + "service": "response_selectors/llm_based_response_selector/service_configs/llm-based-response-selector" } } } diff --git a/skill_selectors/description_based_skill_selector/connector.py b/skill_selectors/description_based_skill_selector/connector.py index 217bd80251..2c7ad76c64 100644 --- a/skill_selectors/description_based_skill_selector/connector.py +++ b/skill_selectors/description_based_skill_selector/connector.py @@ -88,7 +88,7 @@ async def send(self, payload: Dict, callback: Callable): skills_for_uttr.extend(prompted_skills) logger.info("Adding all prompted skills as prompt selector did not select anything.") - if is_any_question_sentence_in_utterance(dialog["human_utterances"][-1]) and is_factoid: + if is_any_question_sentence_in_utterance(dialog["human_utterances"][-1]): skills_for_uttr.append("dff_google_api_skill") if is_factoid: