-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathmain.py
128 lines (93 loc) · 3.87 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import random
from termcolor import colored
import src.config as config
import src.audio as audio
import src.utils as utils
from src.inference import infer
import os
from pynput.keyboard import Key, Listener
def user_input(config):
"""
Returns the user input either by using keyboard input
or by letting the user speak and using speech recognition
"""
# If the program is configured to work with the microphone
if config.AUDIO_INPUT:
print(colored(f'{config.USER_NAME}: ', "yellow") +
colored("### 🤫 PRESS SPACE TO RECORD ####", "white"))
with Listener(
on_press=audio.recorder_handler
) as listener:
listener.join()
message = audio.voice_input
return message
# Else use a simple keyboard input
else:
message = input(colored(f"{config.USER_NAME}: ", "yellow"))
return message
def generate_response(config, full_prompt, seed):
"""
Generating a response using Bloom inference to autocomplete
the conversation given the prompt
"""
# We start by infering by adding a new line starting with BOT_NAME:
resp = infer(f"{full_prompt}\n{config.BOT_NAME}: ", seed)
# We then get the response's generated text which contains the full_prompt
# until now and remove the previous part to only keep the new response
response = resp[0]["generated_text"].split(
f'{config.BOT_NAME}:')[-1].strip()
# We also make sure to remove new line for the user if Bloom added some
response = response.split(config.USER_NAME)[0]
response = f'{config.BOT_NAME}: {response}'
response = utils.punctuation_cut(response)
if response is None:
return generate_response(config, full_prompt, seed + random.randint(0, 9999))
return response
def discussion(config, prompts, seed):
"""
Main function of the program, handles the loop between the user and the chatbot.
This function works recursively.
This function always start by generating a bot message and then ask the user
for a response.
"""
# Generating a bot response
# The full conversation until now
full_prompt = "\n".join(prompts)
# We generate a response from the chatbot using Bloom inference
bot_response = generate_response(config, full_prompt, seed)
if config.AUDIO_OUTPUT:
utils.clear_previous_console_line()
print(colored(bot_response.replace('\n', ''), "green"))
prompts.append(bot_response)
# If AUDIO_OUTPUT is set to true
# play the audio of the response using text to speech
if config.AUDIO_OUTPUT:
audio.text_to_speech(bot_response.split(config.BOT_NAME)[1].strip())
# Asking the user for a response
message = user_input(config)
# The new prompt added by the user is added to the prompts until now
new_prompt = f"{config.USER_NAME}: {message.strip()}"
prompts.append(new_prompt)
# We start a new round of discussion
discussion(config, prompts, seed)
def initialize(config):
"""
A function to initialize the conversation
"""
prompts = [config.SCENARIO]
seed = random.randint(0, 99999)
# clear the console
os.system('clear')
# Audio input instructions
if config.AUDIO_INPUT:
print(colored('-' * 45 + "\nAudio controls:\n- Press SPACEBAR to record or re-record (while the message is in cyan)\n- Press ENTER to validate your message (when the message is in cyan)\n" + '-' * 45 + '\n', "red"))
# Print the context prompt
print(colored(f"\x1B[3m {prompts[0]} \x1B[0m", "blue"))
# In case the user needs to start the conversation
if config.FIRST_MESSAGE_USER:
message = user_input(config)
# The new prompt added by the user is added to the prompts until now
new_prompt = f"{config.USER_NAME}: {message.strip()}"
prompts.append(new_prompt)
discussion(config, prompts, seed)
initialize(config)