Skip to content

Commit

Permalink
add test
Browse files Browse the repository at this point in the history
  • Loading branch information
mattzh72 committed Nov 13, 2024
1 parent 19ce1be commit 1211ba2
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 10 deletions.
3 changes: 1 addition & 2 deletions letta/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ def _format_summary_history(message_history: List[Message]):
def summarize_messages(
agent_state: AgentState,
message_sequence_to_summarize: List[Message],
insert_acknowledgement_assistant_message: bool = True,
):
"""Summarize a message sequence using GPT"""
# we need the context_window
Expand All @@ -77,7 +76,7 @@ def summarize_messages(
)
message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role=MessageRole.user, text=summary_input))

llm_config_no_inner_thoughts = agent_state.llm_config.copy(deep=True)
llm_config_no_inner_thoughts = agent_state.llm_config.model_copy(deep=True)
llm_config_no_inner_thoughts.put_inner_thoughts_in_kwargs = False
response = create(
llm_config=llm_config_no_inner_thoughts,
Expand Down
36 changes: 35 additions & 1 deletion tests/helpers/endpoints_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
FunctionCall,
Message,
)
from letta.utils import get_human_text, get_persona_text
from letta.utils import get_human_text, get_persona_text, json_dumps
from tests.helpers.utils import cleanup

# Generate uuid for agent name for this example
Expand Down Expand Up @@ -321,6 +321,40 @@ def check_agent_edit_core_memory(filename: str) -> LettaResponse:
return response


def check_agent_summarize_memory_simple(filename: str) -> LettaResponse:
"""
Checks that the LLM is able to summarize its memory
"""
# Set up client
client = create_client()
cleanup(client=client, agent_uuid=agent_uuid)

agent_state = setup_agent(client, filename)

# Send a couple messages
friend_name = "Shub"
client.user_message(agent_id=agent_state.id, message="Hey, how's it going? What do you think about this whole shindig")
client.user_message(agent_id=agent_state.id, message=f"By the way, my friend's name is {friend_name}!")
client.user_message(agent_id=agent_state.id, message="Does the number 42 ring a bell?")

# Summarize
agent = client.server._get_or_load_agent(agent_id=agent_state.id)
agent.summarize_messages_inplace()
print(f"Summarization succeeded: messages[1] = \n\n{json_dumps(agent.messages[1])}\n")

response = client.user_message(agent_id=agent_state.id, message="What is my friend's name?")
# Basic checks
assert_sanity_checks(response)

# Make sure my name was repeated back to me
assert_invoked_send_message_with_keyword(response.messages, friend_name)

# Make sure some inner monologue is present
assert_inner_monologue_is_present_and_valid(response.messages)

return response


def run_embedding_endpoint(filename):
# load JSON file
config_data = json.load(open(filename, "r"))
Expand Down
21 changes: 14 additions & 7 deletions tests/test_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
check_agent_archival_memory_retrieval,
check_agent_edit_core_memory,
check_agent_recall_chat_memory,
check_agent_summarize_memory_simple,
check_agent_uses_external_tool,
check_first_response_is_valid_for_llm_endpoint,
check_response_contains_keyword,
Expand Down Expand Up @@ -58,56 +59,62 @@ def wrapper(*args, **kwargs):
# ======================================================================================================================
# OPENAI TESTS
# ======================================================================================================================
def test_openai_gpt_4_returns_valid_first_message():
def test_openai_gpt_4o_returns_valid_first_message():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_first_response_is_valid_for_llm_endpoint(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")


def test_openai_gpt_4_returns_keyword():
def test_openai_gpt_4o_returns_keyword():
keyword = "banana"
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_response_contains_keyword(filename, keyword=keyword)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")


def test_openai_gpt_4_uses_external_tool():
def test_openai_gpt_4o_uses_external_tool():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_uses_external_tool(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")


def test_openai_gpt_4_recall_chat_memory():
def test_openai_gpt_4o_recall_chat_memory():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_recall_chat_memory(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")


def test_openai_gpt_4_archival_memory_retrieval():
def test_openai_gpt_4o_archival_memory_retrieval():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_archival_memory_retrieval(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")


def test_openai_gpt_4_archival_memory_insert():
def test_openai_gpt_4o_archival_memory_insert():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_archival_memory_insert(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")


def test_openai_gpt_4_edit_core_memory():
def test_openai_gpt_4o_edit_core_memory():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_edit_core_memory(filename)
# Log out successful response
print(f"Got successful response from client: \n\n{response}")


def test_openai_gpt_4o_summarize_memory():
filename = os.path.join(llm_config_dir, "openai-gpt-4o.json")
response = check_agent_summarize_memory_simple(filename)
print(f"Got successful response from client: \n\n{response}")


def test_embedding_endpoint_openai():
filename = os.path.join(embedding_config_dir, "openai_embed.json")
run_embedding_endpoint(filename)
Expand Down

0 comments on commit 1211ba2

Please sign in to comment.