Skip to content

Commit

Permalink
use a consistent warning prefix across codebase (#569)
Browse files Browse the repository at this point in the history
  • Loading branch information
cpacker authored Dec 4, 2023
1 parent aaccfb9 commit be08a74
Show file tree
Hide file tree
Showing 8 changed files with 37 additions and 23 deletions.
11 changes: 6 additions & 5 deletions memgpt/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
CORE_MEMORY_HUMAN_CHAR_LIMIT,
CORE_MEMORY_PERSONA_CHAR_LIMIT,
LLM_MAX_TOKENS,
CLI_WARNING_PREFIX,
)
from .errors import LLMError
from .functions.functions import load_all_function_sets
Expand Down Expand Up @@ -505,7 +506,7 @@ def handle_ai_response(self, response_message):
heartbeat_request = function_args.pop("request_heartbeat", None)
if not (isinstance(heartbeat_request, bool) or heartbeat_request is None):
printd(
f"Warning: 'request_heartbeat' arg parsed was not a bool or None, type={type(heartbeat_request)}, value={heartbeat_request}"
f"{CLI_WARNING_PREFIX}'request_heartbeat' arg parsed was not a bool or None, type={type(heartbeat_request)}, value={heartbeat_request}"
)
heartbeat_request = None

Expand Down Expand Up @@ -568,7 +569,7 @@ def step(self, user_message, first_message=False, first_message_retry_limit=FIRS
input_message_sequence = self.messages

if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user":
printd(f"WARNING: attempting to run ChatCompletion without user as the last message in the queue")
printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue")

# Step 1: send the conversation and available functions to GPT
if not skip_verify and (first_message or self.messages_total == self.messages_total_init):
Expand Down Expand Up @@ -620,7 +621,7 @@ def step(self, user_message, first_message=False, first_message_retry_limit=FIRS
# We can't do summarize logic properly if context_window is undefined
if self.config.context_window is None:
# Fallback if for some reason context_window is missing, just set to the default
print(f"WARNING: could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}")
print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}")
print(f"{self.config}")
self.config.context_window = (
str(LLM_MAX_TOKENS[self.model])
Expand All @@ -629,7 +630,7 @@ def step(self, user_message, first_message=False, first_message_retry_limit=FIRS
)
if current_total_tokens > MESSAGE_SUMMARY_WARNING_FRAC * int(self.config.context_window):
printd(
f"WARNING: last response total_tokens ({current_total_tokens}) > {MESSAGE_SUMMARY_WARNING_FRAC * int(self.config.context_window)}"
f"{CLI_WARNING_PREFIX}last response total_tokens ({current_total_tokens}) > {MESSAGE_SUMMARY_WARNING_FRAC * int(self.config.context_window)}"
)
# Only deliver the alert if we haven't already (this period)
if not self.agent_alerted_about_memory_pressure:
Expand Down Expand Up @@ -712,7 +713,7 @@ def summarize_messages_inplace(self, cutoff=None, preserve_last_N_messages=True)
# We can't do summarize logic properly if context_window is undefined
if self.config.context_window is None:
# Fallback if for some reason context_window is missing, just set to the default
print(f"WARNING: could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}")
print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}")
print(f"{self.config}")
self.config.context_window = (
str(LLM_MAX_TOKENS[self.model])
Expand Down
8 changes: 5 additions & 3 deletions memgpt/autogen/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

from colorama import Fore, Style, init

from memgpt.constants import CLI_WARNING_PREFIX

init(autoreset=True)


Expand Down Expand Up @@ -104,7 +106,7 @@ def user_message(self, msg, raw=False):
try:
msg_json = json.loads(msg)
except:
print(f"Warning: failed to parse user message into json")
print(f"{CLI_WARNING_PREFIX}failed to parse user message into json")
message = f"{Fore.GREEN}{Style.BRIGHT}🧑 {Fore.GREEN}{msg}{Style.RESET_ALL}" if self.fancy else f"[user] {msg}"
self.message_list.append(message)
return
Expand Down Expand Up @@ -175,7 +177,7 @@ def function_message(self, msg):
print(e)
message = msg_dict
else:
print(f"Warning: did not recognize function message")
print(f"{CLI_WARNING_PREFIX}did not recognize function message")
message = (
f"{Fore.RED}{Style.BRIGHT}⚡ [function] {Fore.RED}{msg}{Style.RESET_ALL}" if self.fancy else f"[function] {msg}"
)
Expand All @@ -194,7 +196,7 @@ def function_message(self, msg):
f"{Fore.GREEN}{Style.BRIGHT}⚡ [function] {Fore.GREEN}{msg}{Style.RESET_ALL}" if self.fancy else f"[function] {msg}"
)
except Exception:
print(f"Warning: did not recognize function message {type(msg)} {msg}")
print(f"{CLI_WARNING_PREFIX}did not recognize function message {type(msg)} {msg}")
message = f"{Fore.RED}{Style.BRIGHT}⚡ [function] {Fore.RED}{msg}{Style.RESET_ALL}" if self.fancy else f"[function] {msg}"

if message:
Expand Down
19 changes: 11 additions & 8 deletions memgpt/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from memgpt.utils import printd
from memgpt.persistence_manager import LocalStateManager
from memgpt.config import MemGPTConfig, AgentConfig
from memgpt.constants import MEMGPT_DIR
from memgpt.constants import MEMGPT_DIR, CLI_WARNING_PREFIX
from memgpt.agent import Agent
from memgpt.embeddings import embedding_model

Expand Down Expand Up @@ -107,36 +107,39 @@ def run(
# persistence_manager = LocalStateManager(agent_config).load() # TODO: implement load
# TODO: load prior agent state
if persona and persona != agent_config.persona:
typer.secho(f"Warning: Overriding existing persona {agent_config.persona} with {persona}", fg=typer.colors.YELLOW)
typer.secho(f"{CLI_WARNING_PREFIX}Overriding existing persona {agent_config.persona} with {persona}", fg=typer.colors.YELLOW)
agent_config.persona = persona
# raise ValueError(f"Cannot override {agent_config.name} existing persona {agent_config.persona} with {persona}")
if human and human != agent_config.human:
typer.secho(f"Warning: Overriding existing human {agent_config.human} with {human}", fg=typer.colors.YELLOW)
typer.secho(f"{CLI_WARNING_PREFIX}Overriding existing human {agent_config.human} with {human}", fg=typer.colors.YELLOW)
agent_config.human = human
# raise ValueError(f"Cannot override {agent_config.name} existing human {agent_config.human} with {human}")

# Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)
if model and model != agent_config.model:
typer.secho(f"Warning: Overriding existing model {agent_config.model} with {model}", fg=typer.colors.YELLOW)
typer.secho(f"{CLI_WARNING_PREFIX}Overriding existing model {agent_config.model} with {model}", fg=typer.colors.YELLOW)
agent_config.model = model
if context_window is not None and int(context_window) != agent_config.context_window:
typer.secho(
f"Warning: Overriding existing context window {agent_config.context_window} with {context_window}", fg=typer.colors.YELLOW
f"{CLI_WARNING_PREFIX}Overriding existing context window {agent_config.context_window} with {context_window}",
fg=typer.colors.YELLOW,
)
agent_config.context_window = context_window
if model_wrapper and model_wrapper != agent_config.model_wrapper:
typer.secho(
f"Warning: Overriding existing model wrapper {agent_config.model_wrapper} with {model_wrapper}", fg=typer.colors.YELLOW
f"{CLI_WARNING_PREFIX}Overriding existing model wrapper {agent_config.model_wrapper} with {model_wrapper}",
fg=typer.colors.YELLOW,
)
agent_config.model_wrapper = model_wrapper
if model_endpoint and model_endpoint != agent_config.model_endpoint:
typer.secho(
f"Warning: Overriding existing model endpoint {agent_config.model_endpoint} with {model_endpoint}", fg=typer.colors.YELLOW
f"{CLI_WARNING_PREFIX}Overriding existing model endpoint {agent_config.model_endpoint} with {model_endpoint}",
fg=typer.colors.YELLOW,
)
agent_config.model_endpoint = model_endpoint
if model_endpoint_type and model_endpoint_type != agent_config.model_endpoint_type:
typer.secho(
f"Warning: Overriding existing model endpoint type {agent_config.model_endpoint_type} with {model_endpoint_type}",
f"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {agent_config.model_endpoint_type} with {model_endpoint_type}",
fg=typer.colors.YELLOW,
)
agent_config.model_endpoint_type = model_endpoint_type
Expand Down
2 changes: 2 additions & 0 deletions memgpt/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
]
INITIAL_BOOT_MESSAGE_SEND_MESSAGE_FIRST_MSG = STARTUP_QUOTES[2]

CLI_WARNING_PREFIX = "Warning: "

# Constants to do with summarization / conversation length window
# The max amount of tokens supported by the underlying model (eg 8k for gpt-4 and Mistral 7B)
LLM_MAX_TOKENS = {
Expand Down
2 changes: 1 addition & 1 deletion memgpt/functions/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


from memgpt.functions.schema_generator import generate_schema
from memgpt.constants import MEMGPT_DIR
from memgpt.constants import MEMGPT_DIR, CLI_WARNING_PREFIX

sys.path.append(os.path.join(MEMGPT_DIR, "functions"))

Expand Down
7 changes: 4 additions & 3 deletions memgpt/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from colorama import Fore, Style, init

from memgpt.utils import printd
from memgpt.constants import CLI_WARNING_PREFIX

init(autoreset=True)

Expand Down Expand Up @@ -108,7 +109,7 @@ def printd_user_message(icon, msg):
try:
msg_json = json.loads(msg)
except:
printd(f"Warning: failed to parse user message into json")
printd(f"{CLI_WARNING_PREFIX}failed to parse user message into json")
printd_user_message("🧑", msg)
return
if msg_json["type"] == "user_message":
Expand Down Expand Up @@ -186,7 +187,7 @@ def printd_function_message(icon, msg, color=Fore.RED):
printd(msg_dict)
pass
else:
printd(f"Warning: did not recognize function message")
printd(f"{CLI_WARNING_PREFIX}did not recognize function message")
printd_function_message("", msg)
else:
try:
Expand All @@ -196,7 +197,7 @@ def printd_function_message(icon, msg, color=Fore.RED):
else:
printd_function_message("", str(msg), color=Fore.RED)
except Exception:
print(f"Warning: did not recognize function message {type(msg)} {msg}")
print(f"{CLI_WARNING_PREFIX}did not recognize function message {type(msg)} {msg}")
printd_function_message("", msg)

@staticmethod
Expand Down
5 changes: 3 additions & 2 deletions memgpt/local_llm/chat_completion_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from memgpt.local_llm.utils import get_available_wrappers
from memgpt.prompts.gpt_summarize import SYSTEM as SUMMARIZE_SYSTEM_MESSAGE
from memgpt.errors import LocalLLMConnectionError, LocalLLMError
from memgpt.constants import CLI_WARNING_PREFIX

DEBUG = False
# DEBUG = True
Expand Down Expand Up @@ -54,7 +55,7 @@ def get_chat_completion(
# Warn the user that we're using the fallback
if not has_shown_warning:
print(
f"Warning: no wrapper specified for local LLM, using the default wrapper (you can remove this warning by specifying the wrapper with --wrapper)"
f"{CLI_WARNING_PREFIX}no wrapper specified for local LLM, using the default wrapper (you can remove this warning by specifying the wrapper with --wrapper)"
)
has_shown_warning = True
if endpoint_type in ["koboldcpp", "llamacpp", "webui"]:
Expand All @@ -72,7 +73,7 @@ def get_chat_completion(
grammar_name = "json_func_calls_with_inner_thoughts"

if grammar_name is not None and endpoint_type not in ["koboldcpp", "llamacpp", "webui"]:
print(f"Warning: grammars are currently only supported when using llama.cpp as the MemGPT local LLM backend")
print(f"{CLI_WARNING_PREFIX}grammars are currently only supported when using llama.cpp as the MemGPT local LLM backend")

# First step: turn the message sequence into a prompt that the model expects
try:
Expand Down
6 changes: 5 additions & 1 deletion memgpt/openai_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from box import Box

from memgpt.local_llm.chat_completion_proxy import get_chat_completion
from memgpt.constants import CLI_WARNING_PREFIX


def is_context_overflow_error(exception):
Expand Down Expand Up @@ -294,7 +295,10 @@ def wrapper(*args, **kwargs):
delay *= exponential_base * (1 + jitter * random.random())

# Sleep for the delay
printd(f"Got a rate limit error ('{http_err}') on LLM backend request, waiting {int(delay)}s then retrying...")
# printd(f"Got a rate limit error ('{http_err}') on LLM backend request, waiting {int(delay)}s then retrying...")
print(
f"{CLI_WARNING_PREFIX}Got a rate limit error ('{http_err}') on LLM backend request, waiting {int(delay)}s then retrying..."
)
time.sleep(delay)
else:
# For other HTTP errors, re-raise the exception
Expand Down

0 comments on commit be08a74

Please sign in to comment.