From bb5b65f4b9b562ac797bbb8e70f819b2d6c50341 Mon Sep 17 00:00:00 2001 From: dilyararimovna Date: Mon, 10 Jul 2023 21:23:57 +0300 Subject: [PATCH 1/9] fix: add internal external info to resp selector --- .../llm_based_response_selector/server.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index fb24994ae2..f58a532561 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -31,10 +31,15 @@ CRITERION = getenv("CRITERION", "the most appropriate, relevant and non-toxic") PROMPT = ( f"""Select {CRITERION} response among the hypotheses to the given dialog context. """ - """Return only the selected response without extra explanations.""" + """Return only the selected response without extra explanations. """ + """Take into account that some of the questions may require going to the outside services """ + """so if you think that you as an AI language model cannot adequately answer user's question, """ + """prioritize responses coming from the external services: + """ ) ENVVARS_TO_SEND = getenv("ENVVARS_TO_SEND", None) ENVVARS_TO_SEND = [] if ENVVARS_TO_SEND is None else ENVVARS_TO_SEND.split(",") +EXTERNAL_SKILLS = ["factoid_qa", "dff_google_api_skill"] assert GENERATIVE_SERVICE_URL @@ -58,10 +63,13 @@ def select_response_by_scores(hypotheses, scores): def select_response(dialog_context, hypotheses, human_uttr_attributes): try: + ie_types = ["external" if hyp["skilL_name"] else "internal" in EXTERNAL_SKILLS for hyp in hypotheses] if "transformers" in GENERATIVE_SERVICE_URL: - curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}"' for hyp in hypotheses]) + "\n" + PROMPT + curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' + for hyp, ie in zip(hypotheses, ie_types)]) + "\n" + PROMPT else: - curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}"' for hyp in hypotheses]) + curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' + for hyp, ie in zip(hypotheses, ie_types)]) logger.info(f"llm_based_response_selector sends dialog context to llm:\n`{dialog_context}`") logger.info(f"llm_based_response_selector sends prompt to llm:\n`{curr_prompt}`") From 7f17718645f88d82fc79f66ac9d7bc47896b843c Mon Sep 17 00:00:00 2001 From: dilyararimovna Date: Mon, 10 Jul 2023 21:25:39 +0300 Subject: [PATCH 2/9] fix: check --- response_selectors/llm_based_response_selector/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index f58a532561..5111465c11 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -63,7 +63,7 @@ def select_response_by_scores(hypotheses, scores): def select_response(dialog_context, hypotheses, human_uttr_attributes): try: - ie_types = ["external" if hyp["skilL_name"] else "internal" in EXTERNAL_SKILLS for hyp in hypotheses] + ie_types = ["external" if hyp["skilL_name"] in EXTERNAL_SKILLS else "internal" for hyp in hypotheses] if "transformers" in GENERATIVE_SERVICE_URL: curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)]) + "\n" + PROMPT From 405266293144a7b678a88b01891b98226a6568b2 Mon Sep 17 00:00:00 2001 From: dilyararimovna Date: Mon, 10 Jul 2023 21:27:31 +0300 Subject: [PATCH 3/9] fix: spellcheck --- response_selectors/llm_based_response_selector/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index 5111465c11..b0339ed773 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -63,7 +63,7 @@ def select_response_by_scores(hypotheses, scores): def select_response(dialog_context, hypotheses, human_uttr_attributes): try: - ie_types = ["external" if hyp["skilL_name"] in EXTERNAL_SKILLS else "internal" for hyp in hypotheses] + ie_types = ["external" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal" for hyp in hypotheses] if "transformers" in GENERATIVE_SERVICE_URL: curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)]) + "\n" + PROMPT From 0d79fb545071ad6b87873add4896aeb6b3103336 Mon Sep 17 00:00:00 2001 From: dilyararimovna Date: Mon, 10 Jul 2023 21:39:48 +0300 Subject: [PATCH 4/9] fix: short context for resp selector --- .../dream_persona_openai_prompted/docker-compose.override.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml b/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml index af7698a54d..55602b3f9f 100644 --- a/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml +++ b/assistant_dists/dream_persona_openai_prompted/docker-compose.override.yml @@ -55,7 +55,7 @@ services: GENERATIVE_SERVICE_URL: http://openai-api-chatgpt:8145/respond GENERATIVE_SERVICE_CONFIG: openai-chatgpt.json GENERATIVE_TIMEOUT: 120 - N_UTTERANCES_CONTEXT: 7 + N_UTTERANCES_CONTEXT: 1 ENVVARS_TO_SEND: OPENAI_API_KEY,OPENAI_ORGANIZATION FILTER_TOXIC_OR_BADLISTED: 1 context: . From f92482b146ad9f947a958c15f33b78996070f072 Mon Sep 17 00:00:00 2001 From: dilyararimovna Date: Mon, 10 Jul 2023 21:44:50 +0300 Subject: [PATCH 5/9] fix: use service --- response_selectors/llm_based_response_selector/server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index b0339ed773..b3b31bf92f 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -65,10 +65,10 @@ def select_response(dialog_context, hypotheses, human_uttr_attributes): try: ie_types = ["external" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal" for hyp in hypotheses] if "transformers" in GENERATIVE_SERVICE_URL: - curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' + curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie} service]' for hyp, ie in zip(hypotheses, ie_types)]) + "\n" + PROMPT else: - curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' + curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie} service]' for hyp, ie in zip(hypotheses, ie_types)]) logger.info(f"llm_based_response_selector sends dialog context to llm:\n`{dialog_context}`") logger.info(f"llm_based_response_selector sends prompt to llm:\n`{curr_prompt}`") From 32207ebaaadc2bd3201d2ab806eb8343919f8457 Mon Sep 17 00:00:00 2001 From: dilyararimovna Date: Mon, 10 Jul 2023 21:47:08 +0300 Subject: [PATCH 6/9] fix: do not use service --- response_selectors/llm_based_response_selector/server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index b3b31bf92f..b0339ed773 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -65,10 +65,10 @@ def select_response(dialog_context, hypotheses, human_uttr_attributes): try: ie_types = ["external" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal" for hyp in hypotheses] if "transformers" in GENERATIVE_SERVICE_URL: - curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie} service]' + curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)]) + "\n" + PROMPT else: - curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie} service]' + curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)]) logger.info(f"llm_based_response_selector sends dialog context to llm:\n`{dialog_context}`") logger.info(f"llm_based_response_selector sends prompt to llm:\n`{curr_prompt}`") From dbbf03b3c206f17f691c333188a2f3b0d93ef1ab Mon Sep 17 00:00:00 2001 From: kpetyxova Date: Wed, 12 Jul 2023 10:56:39 +0300 Subject: [PATCH 7/9] changed prompted --- response_selectors/llm_based_response_selector/server.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index b0339ed773..2117365cd5 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -32,9 +32,7 @@ PROMPT = ( f"""Select {CRITERION} response among the hypotheses to the given dialog context. """ """Return only the selected response without extra explanations. """ - """Take into account that some of the questions may require going to the outside services """ - """so if you think that you as an AI language model cannot adequately answer user's question, """ - """prioritize responses coming from the external services: + """Always prioritize responses coming from the external services: """ ) ENVVARS_TO_SEND = getenv("ENVVARS_TO_SEND", None) @@ -63,7 +61,7 @@ def select_response_by_scores(hypotheses, scores): def select_response(dialog_context, hypotheses, human_uttr_attributes): try: - ie_types = ["external" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal" for hyp in hypotheses] + ie_types = ["external service" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal service" for hyp in hypotheses] if "transformers" in GENERATIVE_SERVICE_URL: curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)]) + "\n" + PROMPT From 005a82a2d758a379ee8bf58a3861849e6c1677c5 Mon Sep 17 00:00:00 2001 From: kpetyxova Date: Wed, 12 Jul 2023 15:24:38 +0300 Subject: [PATCH 8/9] added low priority for as an ai lan model/chatbot responses --- response_selectors/llm_based_response_selector/server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index 2117365cd5..9280562660 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -32,7 +32,8 @@ PROMPT = ( f"""Select {CRITERION} response among the hypotheses to the given dialog context. """ """Return only the selected response without extra explanations. """ - """Always prioritize responses coming from the external services: + """Always give the lowest priority to responses that contain 'As an AI language model'/'As a chatbot' """ + """and give the highest priority to responses coming from the external services: """ ) ENVVARS_TO_SEND = getenv("ENVVARS_TO_SEND", None) @@ -43,7 +44,7 @@ def filter_out_badlisted_or_toxic(hypotheses): - clean_hypotheses = [] + clean_hypotheses = [] for hyp in hypotheses: is_toxic = is_toxic_or_badlisted_utterance(hyp) if not is_toxic: From b297c67a641ed7fa3742045ba984c26cc054d478 Mon Sep 17 00:00:00 2001 From: kpetyxova Date: Thu, 13 Jul 2023 11:26:41 +0300 Subject: [PATCH 9/9] codestyle --- .../llm_based_response_selector/server.py | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/response_selectors/llm_based_response_selector/server.py b/response_selectors/llm_based_response_selector/server.py index 9280562660..0c88502972 100644 --- a/response_selectors/llm_based_response_selector/server.py +++ b/response_selectors/llm_based_response_selector/server.py @@ -44,7 +44,7 @@ def filter_out_badlisted_or_toxic(hypotheses): - clean_hypotheses = [] + clean_hypotheses = [] for hyp in hypotheses: is_toxic = is_toxic_or_badlisted_utterance(hyp) if not is_toxic: @@ -62,13 +62,22 @@ def select_response_by_scores(hypotheses, scores): def select_response(dialog_context, hypotheses, human_uttr_attributes): try: - ie_types = ["external service" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal service" for hyp in hypotheses] + ie_types = [ + "external service" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal service" for hyp in hypotheses + ] if "transformers" in GENERATIVE_SERVICE_URL: - curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' - for hyp, ie in zip(hypotheses, ie_types)]) + "\n" + PROMPT + curr_prompt = ( + "Hypotheses:\n" + + "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)]) + + "\n" + + PROMPT + ) else: - curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}" [{ie}]' - for hyp, ie in zip(hypotheses, ie_types)]) + curr_prompt = ( + PROMPT + + "\nHypotheses:\n" + + "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)]) + ) logger.info(f"llm_based_response_selector sends dialog context to llm:\n`{dialog_context}`") logger.info(f"llm_based_response_selector sends prompt to llm:\n`{curr_prompt}`")