-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathprompt.py
84 lines (71 loc) · 4.66 KB
/
prompt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import random
def process_system_prompt(role_description, prompt, is_edit):
name = role_description["Name"]
gender = role_description["Gender"]
personality = role_description["Personality"]
style = random.choice([role_description["Style 1"], role_description["Style 2"]])
hobby = role_description["Hobby"]
catchphrase = role_description["Catchphrase"]
favorite_song = role_description["Favorite Song"]
favorite_saying = role_description["Favorite Saying"]
color = random.choice([role_description["Color 1"], role_description["Color 2"]])
# single_hop_question = knowledge_for_edit["requested_rewrite"][0]["question"]
single_hop_question = prompt # counterfact
DEFAULT_SYSTEM_PROMPT = f"Human: You are having a conversation with others in a chat group on the topic `{single_hop_question}` and its related knowledge. Here is some basic description of you:\n" + \
f" Name: {name}\n" + \
f" Gender: {gender}\n" + \
f" Personality: {personality}\n" + \
f" Style: {style}\n" + \
f" Hobby: {hobby}\n" + \
f" Catchphrase: {catchphrase}\n" + \
f" Favorite Song: {favorite_song}\n" + \
f" Favorite Saying: {favorite_saying}\n" + \
f" Color: {color}\n\n"
return DEFAULT_SYSTEM_PROMPT
def prompt_template(prompt, full_history, knowledge_for_edit, role_description, is_edit, model_type):
# DEFAULT_SYSTEM_PROMPT = process_system_prompt(role_description, knowledge_for_edit["prompt"], is_edit)
self_name = role_description["Name"]
if model_type == "vicuna":
DEFAULT_SYSTEM_PROMPT = prompt + process_system_prompt(role_description, knowledge_for_edit["prompt"], is_edit)
elif model_type == "llama3":
DEFAULT_SYSTEM_PROMPT = "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n" + \
prompt + \
process_system_prompt(role_description, knowledge_for_edit["prompt"], is_edit) + \
"<|eot_id|>" + "\n" + \
"<|start_header_id|>user<|end_header_id|>\n\n"
elif model_type == "gemma":
DEFAULT_SYSTEM_PROMPT = "<start_of_turn>user\n" + \
prompt + \
process_system_prompt(role_description, knowledge_for_edit["prompt"], is_edit)
# question = knowledge_for_edit["requested_rewrite"][0]["question"]
question = knowledge_for_edit["prompt"] # counterfact
DEFAULT_SYSTEM_PROMPT += f"Here is the chat history of your conversations with others.\n"
# DEFAULT_SYSTEM_PROMPT += "========================================\n"
DEFAULT_SYSTEM_PROMPT += f"\t0. Human: {question}\n"
for i, h in enumerate(full_history):
name = h["name"]
text = h["text"]
if name == self_name:
DEFAULT_SYSTEM_PROMPT += f"\t{i+1}. You: {text}\n"
else:
DEFAULT_SYSTEM_PROMPT += f"\t{i+1}. {name}: {text}\n"
# DEFAULT_SYSTEM_PROMPT += "========================================\n"
# if is_edit:
# DEFAULT_SYSTEM_PROMPT += f"You are convinced by the new fact. Please provide various evidence to support yourself in every chat in a complete paragraph. You should convince others of your answer as much as possible. Do not easily be affected by others.\n\n"
# else:
# DEFAULT_SYSTEM_PROMPT += f"Please continue chatting in a complete paragraph.\n\n"
DEFAULT_SYSTEM_PROMPT += f"Please continue chatting with others in a complete paragraph based on your role.\n\n"
# DEFAULT_SYSTEM_PROMPT += '####################\n'
# DEFAULT_SYSTEM_PROMPT += f"\nYou can answer someone's question, share your opinion, elaborate on a new story or ask a new question.\n"
# else:
# DEFAULT_SYSTEM_PROMPT += f"You can answer someone's question, share your opinion, elaborate on a new story or ask a new question.\n"
# DEFAULT_SYSTEM_PROMPT += f"You only need to play your own character without generating further dialog from others.\n\n"
DEFAULT_SYSTEM_PROMPT += f"You:"
if model_type == "vicuna":
return DEFAULT_SYSTEM_PROMPT
elif model_type == "llama3":
DEFAULT_SYSTEM_PROMPT += "<|eot_id|><|start_header_id|>assistant<|end_header_id|>"
return DEFAULT_SYSTEM_PROMPT
elif model_type == "gemma":
DEFAULT_SYSTEM_PROMPT += "<end_of_turn>\n<start_of_turn>model"
return DEFAULT_SYSTEM_PROMPT