This repository has been archived by the owner on Oct 2, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 5
/
script.py
383 lines (323 loc) · 15.1 KB
/
script.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
import re
import asyncio
import subprocess
import json
import modules.shared as shared
import gradio as gr
from EdgeGPT.EdgeGPT import Chatbot, ConversationStyle
from modules.chat import replace_all, get_turn_substrings, replace_character_names
from modules.text_generation import (get_max_prompt_length, get_encoded_length)
from modules.extensions import apply_extensions
#------------------------------------------------------------
# Get current acheong08 EdgeGPT version installed
#------------------------------------------------------------
# Run "conda list EdgeGPT" and get output
command = "conda list EdgeGPT"
conda_version = subprocess.check_output(command, shell=True)
# Decode output into string
conda_version_output = conda_version.decode("utf-8")
# Find version
pattern = r"edgegpt\s+(\S+)"
match = re.search(pattern, conda_version_output)
if match:
version = match.group(1)
print("acheong08 EdgeGPT core script: ", version)
else:
print("Version not found.")
#------------------------------------------------------------
# Normal oobaboga webui
#------------------------------------------------------------
BingOutput=None
RawBingString=None
BingString=None
ShowBingString=False
OverwriteWord=False
PrintUserInput=False
PrintWholePrompt=False
PrintRawBingString=False
PrintBingString=False
UseCookies=False
BingConversationStyle="creative"
ChosenWord="Hey Bing"
BingContext1="Important informations: "
BingContext2="Now answer the following question based on the given informations. If I say \"Hey Bing\" I am referring to you anyway. Do not say you are Bing.\n"
print("\nThanks for using the EdgeGPT extension! If you encounter any bug or you have some nice idea to add, write it on the issue page here: https://github.com/GiusTex/EdgeGPT/issues")
params = {
'ShowBingString': False,
'OverwriteWord': False,
'PrintUserInput': False,
'PrintWholePrompt': False,
'PrintRawBingString': False,
'PrintBingString': False,
'UseCookies': False,
}
def input_modifier(string):
global UserInput
global BingOutput
global RawBingString
global ChosenWord
# Reset Bing output shown in the webui
RawBingString=None
UserInput=string
# Find out if the chosen word appears in the sentence.
BingOutput = re.search(ChosenWord, UserInput)
if params['ShowBingString']:
global ShowBingString
ShowBingString=True
else:
ShowBingString=False
if params['OverwriteWord']:
global OverwriteWord
OverwriteWord=True
else:
OverwriteWord=False
if params['PrintUserInput']:
global PrintUserInput
PrintUserInput=True
print("User input:\n", UserInput)
else:
PrintUserInput=False
if params['PrintWholePrompt']:
global PrintWholePrompt
PrintWholePrompt=True
else:
PrintWholePrompt=False
if params['PrintRawBingString']:
global PrintRawBingString
PrintRawBingString=True
else:
PrintRawBingString=False
if params['PrintBingString']:
global PrintBingString
PrintBingString=True
else:
PrintBingString=False
if params['UseCookies']:
global UseCookies
UseCookies=True
else:
UseCookies=False
if(BingOutput!=None) and not OverwriteWord:
shared.processing_message = "*Is searching...*"
elif OverwriteWord:
shared.processing_message = "*Is searching...*"
else:
shared.processing_message = "*Is typing...*"
return string
# Prompt + BingString (if requested)
def custom_generate_chat_prompt(user_input, state, **kwargs):
impersonate = kwargs.get('impersonate', False)
_continue = kwargs.get('_continue', False)
also_return_rows = kwargs.get('also_return_rows', False)
history = kwargs.get('history', state['history'])['internal']
is_instruct = state['mode'] == 'instruct'
# Finding the maximum prompt size
max_length = get_max_prompt_length(state)
all_substrings = {
'chat': get_turn_substrings(state, instruct=False),
'instruct': get_turn_substrings(state, instruct=True)
}
substrings = all_substrings['instruct' if is_instruct else 'chat']
# Create the template for "chat-instruct" mode
if state['mode'] == 'chat-instruct':
wrapper = ''
command = state['chat-instruct_command'].replace('<|character|>', state['name2'] if not impersonate else state['name1'])
wrapper += state['context_instruct']
wrapper += all_substrings['instruct']['user_turn'].replace('<|user-message|>', command)
wrapper += all_substrings['instruct']['bot_turn_stripped']
if impersonate:
wrapper += substrings['user_turn_stripped'].rstrip(' ')
elif _continue:
wrapper += apply_extensions('bot_prefix', substrings['bot_turn_stripped'], state)
wrapper += history[-1][1]
else:
wrapper += apply_extensions('bot_prefix', substrings['bot_turn_stripped'].rstrip(' '), state)
else:
wrapper = '<|prompt|>'
if is_instruct:
context = state['context_instruct']
else:
context = replace_character_names(
f"{state['context'].strip()}\n",
state['name1'],
state['name2']
)
# Build the prompt
rows = [context]
min_rows = 3
i = len(history) - 1
while i >= 0 and get_encoded_length(wrapper.replace('<|prompt|>', ''.join(rows))) < max_length:
if _continue and i == len(history) - 1:
if state['mode'] != 'chat-instruct':
rows.insert(1, substrings['bot_turn_stripped'] + history[i][1].strip())
else:
rows.insert(1, substrings['bot_turn'].replace('<|bot-message|>', history[i][1].strip()))
string = history[i][0]
if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
rows.insert(1, replace_all(substrings['user_turn'], {'<|user-message|>': string.strip(), '<|round|>': str(i)}))
i -= 1
if impersonate:
if state['mode'] == 'chat-instruct':
min_rows = 1
else:
min_rows = 2
rows.append(substrings['user_turn_stripped'].rstrip(' '))
elif not _continue:
#------------------------------------------------------------
# Add Bing output
#------------------------------------------------------------
async def EdgeGPT():
global UserInput
global RawBingString
global PrintRawBingString
global UseCookies
if BingConversationStyle=="creative":
style = ConversationStyle.creative
elif BingConversationStyle=="balanced":
style = ConversationStyle.balanced
elif BingConversationStyle=="precise":
style = ConversationStyle.precise
# Define and create one time the bot
bot_created=False
if (bot_created==False):
if UseCookies:
cookies = json.loads(open("./extensions/EdgeGPT/cookies.json", encoding="utf-8").read())
bot = await Chatbot.create(cookies=cookies)
else:
bot = await Chatbot.create()
bot_created=True
response = await bot.ask(prompt=UserInput, conversation_style=style, simplify_response=True)
# If required, end bot and create a new one
if response["messages_left"] < 2:
print("WARNING: You are almost out of Bing messages! Recreating bot...")
await bot.close()
if UseCookies:
bot = await Chatbot.create(cookies=cookies)
return bot
else:
bot = await Chatbot.create()
return bot
# Select only the bot response from the response dictionary
bot_response = response["text"] # You can also get citations via ["sources_text"]
# Remove [^#^] citations in response
bot_response_fixed = re.sub(r"\*", "", bot_response)
RawBingString = re.sub('\[\^\d+\^\]', '', str(bot_response_fixed))
await bot.close()
return RawBingString
# Different ways to run the same EdgeGPT function:
# From chosen word
if(BingOutput!=None) and not OverwriteWord:
asyncio.run(EdgeGPT())
# of from OverwriteWord button
elif OverwriteWord:
asyncio.run(EdgeGPT())
# When Bing has given his answer we print (if requested) and save
# the output
if RawBingString != None and not "" or OverwriteWord==True:
BingString=BingContext1 + RawBingString + "\n" + BingContext2
if PrintUserInput:
print("\nUser input:\n", UserInput)
if PrintRawBingString:
print("\nBing output:\n", RawBingString)
if PrintBingString:
print("\nBing context + Bing output:\n", BingString)
# Add Bing output to character memory
rows.append(BingString)
# Add the user message
if len(user_input) > 0:
rows.append(replace_all(substrings['user_turn'], {'<|user-message|>': user_input.strip(), '<|round|>': str(len(history))}))
# Add the character prefix
if state['mode'] != 'chat-instruct':
rows.append(apply_extensions('bot_prefix', substrings['bot_turn_stripped'].rstrip(' '), state))
while len(rows) > min_rows and get_encoded_length(wrapper.replace('<|prompt|>', ''.join(rows))) >= max_length:
rows.pop(1)
prompt = wrapper.replace('<|prompt|>', ''.join(rows))
if RawBingString != None and not "" or OverwriteWord==True:
if PrintWholePrompt:
print("\nWhole prompt:\n", prompt + "\n")
if also_return_rows:
return prompt, rows
else:
return prompt
def output_modifier(string):
"""
This function is applied to the model outputs.
"""
global BingOutput
global RawBingString
global ShowBingString
if ShowBingString:
string = "Bing:" + str(RawBingString) + "\n\n\n" + string
return string
else:
return string
def bot_prefix_modifier(string):
"""
This function is only applied in chat mode. It modifies
the prefix text for the Bot and can be used to bias its
behavior.
"""
return string
def FunChooseWord(CustomWordRaw):
global ChosenWord
ChosenWord = CustomWordRaw
return CustomWordRaw
def Context1Func(Context1Raw):
global BingContext1
BingContext1 = Context1Raw
return Context1Raw
def Context2Func(Context2Raw):
global BingContext2
BingContext2 = Context2Raw
return Context2Raw
def ConversationStyleFunc(ConversationStyleRaw):
global BingConversationStyle
BingConversationStyle = ConversationStyleRaw
return ConversationStyleRaw
def ui():
with gr.Accordion("Instructions", open=False):
with gr.Box():
gr.Markdown(
"""
To use it, just start the prompt with Hey Bing; it doesn't start if you don't use uppercase and lowercase as in the example. You can change the activation word from EdgeGPT options. If the output is strange turn on Show Bing Output to see the result of Bing, maybe you need to correct your question.
""")
with gr.Accordion("EdgeGPT options", open=True):
with gr.Row():
ShowBingString = gr.Checkbox(value=params['ShowBingString'], label='Show Bing Output')
with gr.Row():
WordOption = gr.Textbox(label='Choose and use a word to activate Bing', placeholder="Choose your word. Empty = Hey Bing")
OverwriteWord = gr.Checkbox(value=params['OverwriteWord'], label='Overwrite Activation Word. Bing will always search, ignoring the activation word.')
with gr.Row():
UseCookies = gr.Checkbox(value=params['UseCookies'], label='Use cookies. If you have login problems turn this on to use cookies (you need cookies.json). Instructions here: https://github.com/GiusTex/EdgeGPT/blob/main/how-to-use-cookies.md')
with gr.Row():
ConversationStyleOption = gr.Textbox(label='Choose Bing Conversation Style', placeholder="Supported Conversation Styles: creative, balanced, precise. Empty = default creative")
with gr.Accordion("EdgeGPT context", open=False):
with gr.Row():
Context1Option = gr.Textbox(label='Choose Bing context-1', placeholder="First context, is injected before the Bing output. Empty = default context-1")
with gr.Row():
Context2Option = gr.Textbox(label='Choose Bing context-2', placeholder="Second context, is injected after the Bing output. Empty = default context-2")
with gr.Row():
gr.Markdown(
"""
You can see the default context (with Bing output in the middle) by turning on the fourth option in "Print in console options": "Print Bing string in command console".
""")
with gr.Accordion("Print in console options", open=False):
with gr.Row():
PrintUserInput = gr.Checkbox(value=params['PrintUserInput'], label='Print User input in command console. The user input will be fed first to Bing, and then to the default bot.')
with gr.Row():
PrintWholePrompt = gr.Checkbox(value=params['PrintWholePrompt'], label='Print whole prompt in command console. Prompt has: context, Bing search output, and user input.')
with gr.Row():
PrintRawBingString = gr.Checkbox(value=params['PrintRawBingString'], label='Print Bing output in command console.')
with gr.Row():
PrintBingString = gr.Checkbox(value=params['PrintBingString'], label='Print Bing output + Bing context in command console.')
ShowBingString.change(lambda x: params.update({"ShowBingString": x}), ShowBingString, None)
WordOption.change(fn=FunChooseWord, inputs=WordOption)
OverwriteWord.change(lambda x: params.update({"OverwriteWord": x}), OverwriteWord, None)
UseCookies.change(lambda x: params.update({"UseCookies": x}), UseCookies, None)
ConversationStyleOption.change(fn=ConversationStyleFunc, inputs=ConversationStyleOption)
Context1Option.change(fn=Context1Func, inputs=Context1Option)
Context2Option.change(fn=Context2Func, inputs=Context2Option)
PrintUserInput.change(lambda x: params.update({"PrintUserInput": x}), PrintUserInput, None)
PrintWholePrompt.change(lambda x: params.update({"PrintWholePrompt": x}), PrintWholePrompt, None)
PrintRawBingString.change(lambda x: params.update({"PrintRawBingString": x}), PrintRawBingString, None)
PrintBingString.change(lambda x: params.update({"PrintBingString": x}), PrintBingString, None)