Skip to content

Commit

Permalink
Merge pull request #117 from cpacker/cleanup
Browse files Browse the repository at this point in the history
Cleanup
  • Loading branch information
cpacker authored Oct 25, 2023
2 parents f7a9540 + 725d1bf commit 3e6ab1e
Show file tree
Hide file tree
Showing 4 changed files with 465 additions and 7 deletions.
15 changes: 8 additions & 7 deletions memgpt/local_llm/chat_completion_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
import json

from .webui.api import get_webui_completion
from .llm_chat_completion_wrappers import airoboros
from .llm_chat_completion_wrappers import airoboros, dolphin
from .utils import DotDict

HOST = os.getenv("OPENAI_API_BASE")
HOST_TYPE = os.getenv("BACKEND_TYPE") # default None == ChatCompletion
DEBUG = False
DEFAULT_WRAPPER = airoboros.Airoboros21InnerMonologueWrapper()


async def get_chat_completion(
Expand All @@ -22,14 +23,14 @@ async def get_chat_completion(
if function_call != "auto":
raise ValueError(f"function_call == {function_call} not supported (auto only)")

if model == "airoboros_v2.1":
llm_wrapper = airoboros.Airoboros21Wrapper()
if model == "airoboros-l2-70b-2.1":
llm_wrapper = airoboros.Airoboros21InnerMonologueWrapper()
elif model == "dolphin-2.1-mistral-7b":
llm_wrapper = dolphin.Dolphin21MistralWrapper()
else:
# Warn the user that we're using the fallback
print(
f"Warning: could not find an LLM wrapper for {model}, using the airoboros wrapper"
)
llm_wrapper = airoboros.Airoboros21Wrapper()
print(f"Warning: no wrapper specified for local LLM, using the default wrapper")
llm_wrapper = DEFAULT_WRAPPER

# First step: turn the message sequence into a prompt that the model expects
prompt = llm_wrapper.chat_completion_to_prompt(messages, functions)
Expand Down
213 changes: 213 additions & 0 deletions memgpt/local_llm/llm_chat_completion_wrappers/airoboros.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def create_function_call(function_call):
if self.include_opening_brance_in_prefix:
prompt += "\n{"

print(prompt)
return prompt

def clean_function_args(self, function_name, function_args):
Expand Down Expand Up @@ -202,3 +203,215 @@ def output_to_chat_completion_response(self, raw_llm_output):
},
}
return message


class Airoboros21InnerMonologueWrapper(Airoboros21Wrapper):
"""Still expect only JSON outputs from model, but add inner monologue as a field"""

def __init__(
self,
simplify_json_content=True,
clean_function_args=True,
include_assistant_prefix=True,
include_opening_brace_in_prefix=True,
include_section_separators=True,
):
self.simplify_json_content = simplify_json_content
self.clean_func_args = clean_function_args
self.include_assistant_prefix = include_assistant_prefix
self.include_opening_brance_in_prefix = include_opening_brace_in_prefix
self.include_section_separators = include_section_separators

def chat_completion_to_prompt(self, messages, functions):
"""Example for airoboros: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#prompt-format
A chat.
USER: {prompt}
ASSISTANT:
Functions support: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#agentfunction-calling
As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format.
Input: I want to know how many times 'Python' is mentioned in my text file.
Available functions:
file_analytics:
description: This tool performs various operations on a text file.
params:
action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc.
filters:
keyword: The word or phrase we want to search for.
OpenAI functions schema style:
{
"name": "send_message",
"description": "Sends a message to the human user",
"parameters": {
"type": "object",
"properties": {
# https://json-schema.org/understanding-json-schema/reference/array.html
"message": {
"type": "string",
"description": "Message contents. All unicode (including emojis) are supported.",
},
},
"required": ["message"],
}
},
"""
prompt = ""

# System insturctions go first
assert messages[0]["role"] == "system"
prompt += messages[0]["content"]

# Next is the functions preamble
def create_function_description(schema, add_inner_thoughts=True):
# airorobos style
func_str = ""
func_str += f"{schema['name']}:"
func_str += f"\n description: {schema['description']}"
func_str += f"\n params:"
if add_inner_thoughts:
func_str += (
f"\n inner_thoughts: Deep inner monologue private to you only."
)
for param_k, param_v in schema["parameters"]["properties"].items():
# TODO we're ignoring type
func_str += f"\n {param_k}: {param_v['description']}"
# TODO we're ignoring schema['parameters']['required']
return func_str

# prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format."
prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
prompt += f"\nAvailable functions:"
for function_dict in functions:
prompt += f"\n{create_function_description(function_dict)}"

def create_function_call(function_call, inner_thoughts=None):
"""Go from ChatCompletion to Airoboros style function trace (in prompt)
ChatCompletion data (inside message['function_call']):
"function_call": {
"name": ...
"arguments": {
"arg1": val1,
...
}
Airoboros output:
{
"function": "send_message",
"params": {
"message": "Hello there! I am Sam, an AI developed by Liminal Corp. How can I assist you today?"
}
}
"""
airo_func_call = {
"function": function_call["name"],
"params": {
"inner_thoughts": inner_thoughts,
**json.loads(function_call["arguments"]),
},
}
return json.dumps(airo_func_call, indent=2)

# Add a sep for the conversation
if self.include_section_separators:
prompt += "\n### INPUT"

# Last are the user/assistant messages
for message in messages[1:]:
assert message["role"] in ["user", "assistant", "function"], message

if message["role"] == "user":
if self.simplify_json_content:
try:
content_json = json.loads(message["content"])
content_simple = content_json["message"]
prompt += f"\nUSER: {content_simple}"
except:
prompt += f"\nUSER: {message['content']}"
elif message["role"] == "assistant":
prompt += f"\nASSISTANT:"
# need to add the function call if there was one
inner_thoughts = message["content"]
if message["function_call"]:
prompt += f"\n{create_function_call(message['function_call'], inner_thoughts=inner_thoughts)}"
elif message["role"] == "function":
# TODO find a good way to add this
# prompt += f"\nASSISTANT: (function return) {message['content']}"
prompt += f"\nFUNCTION RETURN: {message['content']}"
continue
else:
raise ValueError(message)

# Add a sep for the response
if self.include_section_separators:
prompt += "\n### RESPONSE"

if self.include_assistant_prefix:
prompt += f"\nASSISTANT:"
if self.include_opening_brance_in_prefix:
prompt += "\n{"

return prompt

def clean_function_args(self, function_name, function_args):
"""Some basic MemGPT-specific cleaning of function args"""
cleaned_function_name = function_name
cleaned_function_args = function_args.copy()

if function_name == "send_message":
# strip request_heartbeat
cleaned_function_args.pop("request_heartbeat", None)

inner_thoughts = None
if "inner_thoughts" in function_args:
inner_thoughts = cleaned_function_args.pop("inner_thoughts")

# TODO more cleaning to fix errors LLM makes
return inner_thoughts, cleaned_function_name, cleaned_function_args

def output_to_chat_completion_response(self, raw_llm_output):
"""Turn raw LLM output into a ChatCompletion style response with:
"message" = {
"role": "assistant",
"content": ...,
"function_call": {
"name": ...
"arguments": {
"arg1": val1,
...
}
}
}
"""
if self.include_opening_brance_in_prefix and raw_llm_output[0] != "{":
raw_llm_output = "{" + raw_llm_output

try:
function_json_output = json.loads(raw_llm_output)
except Exception as e:
raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output}")
function_name = function_json_output["function"]
function_parameters = function_json_output["params"]

if self.clean_func_args:
(
inner_thoughts,
function_name,
function_parameters,
) = self.clean_function_args(function_name, function_parameters)

message = {
"role": "assistant",
"content": inner_thoughts,
"function_call": {
"name": function_name,
"arguments": json.dumps(function_parameters),
},
}
return message
Loading

0 comments on commit 3e6ab1e

Please sign in to comment.