-
Notifications
You must be signed in to change notification settings - Fork 1
/
retrieval.py
79 lines (63 loc) · 3.06 KB
/
retrieval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import os
from dotenv import load_dotenv
from openai import OpenAI
from config import config
load_dotenv()
openai_client = OpenAI(api_key=os.getenv("OPENAI_API"))
MODEL = config["models"]["retrieval"]
def augment_query_generated(query, model=MODEL):
"""
Generate an augmented version of a given query using the specified language model.
This function sends the input query to the language model with a prompt that instructs
the model to provide an example answer like those found in scientific articles. The function
then returns the model's response as the augmented query.
Args:
query (str): The original query to be augmented.
model (str, optional): The name of the language model to use. Defaults to the model specified in the configuration.
Returns:
str: The augmented query generated by the language model.
"""
messages = [
{
"role": "system",
"content": "You are a helpful expert research in artificial intelligence. Provide an example answer to the given question, that might be found in a document like a scientific article. ",
},
{"role": "user", "content": query},
]
response = openai_client.chat.completions.create(
model=model,
messages=messages,
)
content = response.choices[0].message.content
return content
def augment_multiple_query(query, model=MODEL):
"""
Generate multiple related queries based on the given input query using the specified language model.
This function sends the input query to the language model with a prompt that instructs
the model to suggest up to five additional related questions. These questions aim to cover
different aspects of the original query's topic. The function splits the model's response into
individual questions and returns them as a list.
Args:
query (str): The original query to generate related questions for.
model (str, optional): The name of the language model to use. Defaults to the model specified in the configuration.
Returns:
list of str: A list of related questions generated by the language model.
"""
messages = [
{
"role": "system",
"content": "You are a helpful expert research assistant in artificial intelligence. Your users are asking questions about scientific article. "
"Suggest up to five additional related questions to help them find the information they need, for the provided question. "
"Suggest only short questions without compound sentences. Suggest a variety of questions that cover different aspects of the topic."
"Make sure they are complete questions, and that they are related to the original question."
"Output one question per line. Do not number the questions.",
},
{"role": "user", "content": query},
]
response = openai_client.chat.completions.create(
model=model,
messages=messages,
)
content = response.choices[0].message.content
content = content.split("\n")
return content