Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow MemGPT to read/write text files + make HTTP requests #174

Merged
merged 3 commits into from
Nov 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 72 additions & 1 deletion memgpt/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pickle
import math
import os
import requests
import json
import threading

Expand Down Expand Up @@ -247,7 +248,6 @@ def init_avail_functions(self):
"edit_memory_append": self.edit_memory_append,
"edit_memory_replace": self.edit_memory_replace,
"pause_heartbeats": self.pause_heartbeats,
"message_chatgpt": self.message_chatgpt,
"core_memory_append": self.edit_memory_append,
"core_memory_replace": self.edit_memory_replace,
"recall_memory_search": self.recall_memory_search,
Expand All @@ -256,6 +256,10 @@ def init_avail_functions(self):
"conversation_search_date": self.recall_memory_search_date,
"archival_memory_insert": self.archival_memory_insert,
"archival_memory_search": self.archival_memory_search,
# extras
"read_from_text_file": self.read_from_text_file,
"append_to_text_file": self.append_to_text_file,
"http_request": self.http_request,
}

@property
Expand Down Expand Up @@ -797,6 +801,73 @@ def message_chatgpt(self, message):
reply = response.choices[0].message.content
return reply

def read_from_text_file(self, filename, line_start, num_lines=1, max_chars=500, trunc_message=True):
if not os.path.exists(filename):
raise FileNotFoundError(f"The file '{filename}' does not exist.")

if line_start < 1 or num_lines < 1:
raise ValueError("Both line_start and num_lines must be positive integers.")

lines = []
chars_read = 0
with open(filename, "r") as file:
for current_line_number, line in enumerate(file, start=1):
if line_start <= current_line_number < line_start + num_lines:
chars_to_add = len(line)
if max_chars is not None and chars_read + chars_to_add > max_chars:
# If adding this line exceeds MAX_CHARS, truncate the line if needed and stop reading further.
excess_chars = (chars_read + chars_to_add) - max_chars
lines.append(line[:-excess_chars].rstrip("\n"))
if trunc_message:
lines.append(f"[SYSTEM ALERT - max chars ({max_chars}) reached during file read]")
break
else:
lines.append(line.rstrip("\n"))
chars_read += chars_to_add
if current_line_number >= line_start + num_lines - 1:
break

return "\n".join(lines)

def append_to_text_file(self, filename, content):
if not os.path.exists(filename):
raise FileNotFoundError(f"The file '{filename}' does not exist.")

with open(filename, "a") as file:
file.write(content + "\n")

def http_request(self, method, url, payload_json=None):
"""
Makes an HTTP request based on the specified method, URL, and JSON payload.

Args:
method (str): The HTTP method (e.g., 'GET', 'POST').
url (str): The URL for the request.
payload_json (str): A JSON string representing the request payload.

Returns:
dict: The response from the HTTP request.
"""
try:
headers = {"Content-Type": "application/json"}

# For GET requests, ignore the payload
if method.upper() == "GET":
print(f"[HTTP] launching GET request to {url}")
response = requests.get(url, headers=headers)
else:
# Validate and convert the payload for other types of requests
if payload_json:
payload = json.loads(payload_json)
else:
payload = {}
print(f"[HTTP] launching {method} request to {url}, payload=\n{json.dumps(payload, indent=2)}")
response = requests.request(method, url, json=payload, headers=headers)

return {"status_code": response.status_code, "headers": dict(response.headers), "body": response.text}
except Exception as e:
return {"error": str(e)}

def pause_heartbeats(self, minutes, max_pause=MAX_PAUSE_HEARTBEATS):
"""Pause timed heartbeats for N minutes"""
minutes = min(max_pause, minutes)
Expand Down
4 changes: 2 additions & 2 deletions memgpt/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,15 +445,15 @@ async def run_agent_loop(memgpt_agent, first, no_verify=False, cfg=None, legacy=
continue

elif user_input.lower() == "/dump":
await print_messages(memgpt_agent.messages)
await memgpt.interface.print_messages(memgpt_agent.messages)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This patches a bug

continue

elif user_input.lower() == "/dumpraw":
await memgpt.interface.print_messages_raw(memgpt_agent.messages)
continue

elif user_input.lower() == "/dump1":
await print_messages(memgpt_agent.messages[-1])
await memgpt.interface.print_messages(memgpt_agent.messages[-1])
continue

elif user_input.lower() == "/memory":
Expand Down
38 changes: 37 additions & 1 deletion memgpt/presets.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def use_preset(preset_name, agent_config, model, persona, human, interface, pers
first_message_verify_mono=True if "gpt-4" in model else False,
)

if preset_name == "memgpt_chat_sync": # TODO: remove me after we move the CLI to AgentSync
elif preset_name == "memgpt_chat_sync": # TODO: remove me after we move the CLI to AgentSync
functions = [
"send_message",
"pause_heartbeats",
Expand Down Expand Up @@ -77,5 +77,41 @@ def use_preset(preset_name, agent_config, model, persona, human, interface, pers
first_message_verify_mono=True if "gpt-4" in model else False,
)

elif preset_name == "memgpt_extras":
functions = [
"send_message",
"pause_heartbeats",
"core_memory_append",
"core_memory_replace",
"conversation_search",
"conversation_search_date",
"archival_memory_insert",
"archival_memory_search",
# extra for read/write to files
"read_from_text_file",
"append_to_text_file",
# internet access
"http_request",
]
available_functions = [v for k, v in gpt_functions.FUNCTIONS_CHAINING.items() if k in functions]
printd(f"Available functions:\n", [x["name"] for x in available_functions])
assert len(functions) == len(available_functions)

if "gpt-3.5" in model:
# use a different system message for gpt-3.5
preset_name = "memgpt_gpt35_extralong"

return AgentAsync(
model=model,
system=gpt_system.get_system_text("memgpt_chat"),
functions=available_functions,
interface=interface,
persistence_manager=persistence_manager,
persona_notes=persona,
human_notes=human,
# gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
first_message_verify_mono=True if "gpt-4" in model else False,
)

else:
raise ValueError(preset_name)
74 changes: 74 additions & 0 deletions memgpt/prompts/gpt_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,4 +235,78 @@
"required": ["name", "query", "page", "request_heartbeat"],
},
},
"read_from_text_file": {
"name": "read_from_text_file",
"description": "Read lines from a text file.",
"parameters": {
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "The name of the file to read.",
},
"line_start": {
"type": "integer",
"description": "Line to start reading from.",
},
"num_lines": {
"type": "integer",
"description": "How many lines to read (defaults to 1).",
},
"request_heartbeat": {
"type": "boolean",
"description": FUNCTION_PARAM_DESCRIPTION_REQ_HEARTBEAT,
},
},
"required": ["filename", "line_start", "request_heartbeat"],
},
},
"append_to_text_file": {
"name": "append_to_text_file",
"description": "Append to a text file.",
"parameters": {
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "The name of the file to read.",
},
"content": {
"type": "string",
"description": "Content to append to the file.",
},
"request_heartbeat": {
"type": "boolean",
"description": FUNCTION_PARAM_DESCRIPTION_REQ_HEARTBEAT,
},
},
"required": ["filename", "content", "request_heartbeat"],
},
},
"http_request": {
"name": "http_request",
"description": "Generates an HTTP request and returns the response.",
"parameters": {
"type": "object",
"properties": {
"method": {
"type": "string",
"description": "The HTTP method (e.g., 'GET', 'POST').",
},
"url": {
"type": "string",
"description": "The URL for the request",
},
"payload": {
"type": "string",
"description": "A JSON string representing the request payload.",
},
"request_heartbeat": {
"type": "boolean",
"description": FUNCTION_PARAM_DESCRIPTION_REQ_HEARTBEAT,
},
},
"required": ["method", "url", "request_heartbeat"],
},
},
}
2 changes: 1 addition & 1 deletion memgpt/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def chunk_file(file, tkns_per_chunk=300, model="gpt-4"):
encoding = tiktoken.encoding_for_model(model)

if file.endswith(".db"):
return # can't read the sqlite db this way, will get handled in main.py
return # can't read the sqlite db this way, will get handled in main.py

with open(file, "r") as f:
if file.endswith(".pdf"):
Expand Down
Loading