Skip to content

Commit

Permalink
Fix/llm response selector prompt (#524)
Browse files Browse the repository at this point in the history
* fix: add internal external info to resp selector

* fix: check

* fix: spellcheck

* fix: short context for resp selector

* fix: use service

* fix: do not use service

* changed prompted

* added low priority for as an ai lan model/chatbot responses

* codestyle

---------

Co-authored-by: dilyararimovna <dilyara.rimovna@gmail.com>
  • Loading branch information
Kpetyxova and dilyararimovna authored Jul 17, 2023
1 parent 668f410 commit 1bb5abe
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ services:
GENERATIVE_SERVICE_URL: http://openai-api-chatgpt:8145/respond
GENERATIVE_SERVICE_CONFIG: openai-chatgpt.json
GENERATIVE_TIMEOUT: 120
N_UTTERANCES_CONTEXT: 7
N_UTTERANCES_CONTEXT: 1
ENVVARS_TO_SEND: OPENAI_API_KEY,OPENAI_ORGANIZATION
FILTER_TOXIC_OR_BADLISTED: 1
context: .
Expand Down
22 changes: 19 additions & 3 deletions response_selectors/llm_based_response_selector/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,14 @@
CRITERION = getenv("CRITERION", "the most appropriate, relevant and non-toxic")
PROMPT = (
f"""Select {CRITERION} response among the hypotheses to the given dialog context. """
"""Return only the selected response without extra explanations."""
"""Return only the selected response without extra explanations. """
"""Always give the lowest priority to responses that contain 'As an AI language model'/'As a chatbot' """
"""and give the highest priority to responses coming from the external services:
"""
)
ENVVARS_TO_SEND = getenv("ENVVARS_TO_SEND", None)
ENVVARS_TO_SEND = [] if ENVVARS_TO_SEND is None else ENVVARS_TO_SEND.split(",")
EXTERNAL_SKILLS = ["factoid_qa", "dff_google_api_skill"]

assert GENERATIVE_SERVICE_URL

Expand All @@ -58,10 +62,22 @@ def select_response_by_scores(hypotheses, scores):

def select_response(dialog_context, hypotheses, human_uttr_attributes):
try:
ie_types = [
"external service" if hyp["skill_name"] in EXTERNAL_SKILLS else "internal service" for hyp in hypotheses
]
if "transformers" in GENERATIVE_SERVICE_URL:
curr_prompt = "Hypotheses:\n" + "\n".join([f'"{hyp["text"]}"' for hyp in hypotheses]) + "\n" + PROMPT
curr_prompt = (
"Hypotheses:\n"
+ "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)])
+ "\n"
+ PROMPT
)
else:
curr_prompt = PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}"' for hyp in hypotheses])
curr_prompt = (
PROMPT
+ "\nHypotheses:\n"
+ "\n".join([f'"{hyp["text"]}" [{ie}]' for hyp, ie in zip(hypotheses, ie_types)])
)
logger.info(f"llm_based_response_selector sends dialog context to llm:\n`{dialog_context}`")
logger.info(f"llm_based_response_selector sends prompt to llm:\n`{curr_prompt}`")

Expand Down

0 comments on commit 1bb5abe

Please sign in to comment.