-
Notifications
You must be signed in to change notification settings - Fork 7
/
combine_simulators.py
154 lines (142 loc) · 5.21 KB
/
combine_simulators.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import json
import re
import sys
from typing import Dict
import torch
from tqdm.auto import tqdm
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
def jaccard_similarity(list1, list2):
s1 = set(list1)
s2 = set(list2)
return float(len(s1.intersection(s2)) / len(s1.union(s2)))
persona = json.load(open(sys.argv[1], "r"))
intent_description: Dict[str, str] = {
"LookupSong": "search for a song",
"PlaySong": "play the selected song on the device",
"LookupMusic": "search for a song based on the name and optionally other attributes",
"FindMovies": "find movies by genre and optionally director",
"GetTimesForMovie": "get show times for a movie at a location on a given date",
"FindAttractions": "browse attractions in a given city",
}
output = open("combine_simulators.json", "w")
transition_questions: Dict[str, str] = {
k: f"Do you want to {v}?" for (k, v) in intent_description.items()
}
device = "cuda" if torch.cuda.is_available() else "cpu"
end_keywords = ["goodbye", "bye"]
end_sentences = [
"have a great day",
"have a nice day",
"have a good day",
"have a wonderful day",
"enjoy your day",
"have a good one",
"have a good time",
"enjoy the rest of your day",
"have a fantastic day",
"i am glad i could help have a nice day",
]
intent = {}
data = []
for d in tqdm(persona):
intent_appear = False
history = []
context = []
for i, turn in enumerate(d):
history.append(turn["text"])
context.append(turn["text"])
if len(turn["intent"]) != 0:
last_chit_chat = d[i + 1]["text"] if (i + 1) < len(d) else ""
intent_appear = True
intent = {"type": turn["intent"], "position": i}
whole_transition = (
last_chit_chat + " " + transition_questions[turn["intent"][0]]
)
history.append(whole_transition)
context.append(whole_transition)
history = history[-3:]
break
if intent_appear:
for _ in range(4):
user_checkpoint = "stanleychu2/user_400M"
user_tokenizer = AutoTokenizer.from_pretrained(
user_checkpoint, use_fast=False
)
user = AutoModelForSeq2SeqLM.from_pretrained(user_checkpoint).to(device)
user.eval()
prefix = "user: "
inputs = user_tokenizer(
" ".join(history), max_length=128, truncation=True, return_tensors="pt"
).to(device)
outputs = user.generate(
**inputs,
do_sample=True,
top_k=120,
no_repeat_ngram_size=2,
min_length=1,
max_length=64,
).squeeze(0)
# 8010 = __END__
if 8010 in outputs:
print("__END__")
break
utterance = user_tokenizer.decode(
outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True
).strip()
history.append(utterance)
context.append(utterance)
history = history[-2:]
system_checkpoint = "stanleychu2/system_400M"
prefix = "sys: "
sys_tokenizer = AutoTokenizer.from_pretrained(
system_checkpoint, use_fast=False
)
system = AutoModelForSeq2SeqLM.from_pretrained(system_checkpoint).to(device)
system.eval()
inputs = sys_tokenizer(
" ".join(history), max_length=128, truncation=True, return_tensors="pt"
).to(device)
outputs = system.generate(
**inputs,
do_sample=True,
num_beams=5,
no_repeat_ngram_size=3,
num_return_sequences=5,
early_stopping=True,
max_length=128,
).squeeze(0)
utterance = user_tokenizer.decode(
outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True
).strip()
processed_utterance = re.sub(r"[^\w\s]", "", utterance.lower())
processed_last_utterance = re.sub(r"[^\w\s]", "", history[-2].lower())
if (
jaccard_similarity(
sys_tokenizer.tokenize(processed_last_utterance),
sys_tokenizer.tokenize(processed_utterance),
)
> 0.4
):
print("REPEAT:", utterance)
print("REPEAT:", history[-2])
break
history.append(utterance)
context.append(utterance)
history = history[-2:]
if any([(k in utterance) for k in end_keywords]) or any(
[
jaccard_similarity(
sys_tokenizer.tokenize(processed_utterance),
sys_tokenizer.tokenize(s),
)
> 0.2
for s in end_sentences
]
):
print("RULE:", utterance)
break
print(context)
data.append(
{"id": f"simulateTOD_{len(data):04d}", "dialog": context, "intent": intent}
)
json.dump(data, output, indent=4)